diff --git a/.cargo/config.toml b/.cargo/config.toml index de299a90971e4..5355758f7a4fa 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -12,8 +12,6 @@ rustflags = [ "-Aclippy::if-same-then-else", "-Aclippy::clone-double-ref", "-Dclippy::complexity", - "-Aclippy::clone_on_copy", # Too common - "-Aclippy::needless_lifetimes", # Backward compat? "-Aclippy::zero-prefixed-literal", # 00_1000_000 "-Aclippy::type_complexity", # raison d'etre "-Aclippy::nonminimal-bool", # maybe diff --git a/.github/workflows/auto-label-issues.yml b/.github/workflows/auto-label-issues.yml index cd889b5941989..2633bf55f0789 100644 --- a/.github/workflows/auto-label-issues.yml +++ b/.github/workflows/auto-label-issues.yml @@ -8,10 +8,10 @@ on: jobs: label-new-issues: - runs-on: ubuntu-latest + runs-on: ubuntu-latest steps: - name: Label drafts - uses: andymckay/labeler@master + uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 if: github.event.issue.author_association == 'NONE' with: add-labels: 'Z0-unconfirmed' diff --git a/.github/workflows/auto-label-prs.yml b/.github/workflows/auto-label-prs.yml index f0b8e9b343e29..50539b80b98b7 100644 --- a/.github/workflows/auto-label-prs.yml +++ b/.github/workflows/auto-label-prs.yml @@ -8,13 +8,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Label drafts - uses: andymckay/labeler@master + uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 if: github.event.pull_request.draft == true with: add-labels: 'A3-inprogress' remove-labels: 'A0-pleasereview' - name: Label PRs - uses: andymckay/labeler@master + uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 if: github.event.pull_request.draft == false && ! contains(github.event.pull_request.labels.*.name, 'A2-insubstantial') with: add-labels: 'A0-pleasereview' diff --git a/.github/workflows/md-link-check.yml b/.github/workflows/md-link-check.yml index 4b6b9166be2df..e1387f6da13f7 100644 --- a/.github/workflows/md-link-check.yml +++ b/.github/workflows/md-link-check.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: gaurav-nelson/github-action-markdown-link-check@9710f0fec812ce0a3b98bef4c9d842fc1f39d976 # v1.0.13 + - uses: gaurav-nelson/github-action-markdown-link-check@0a51127e9955b855a9bbfa1ff5577f1d1338c9a5 # 1.0.14 with: use-quiet-mode: 'yes' config-file: '.github/workflows/mlc_config.json' diff --git a/.github/workflows/monthly-tag.yml b/.github/workflows/monthly-tag.yml index ade8bd4717c39..6bec03d27e7be 100644 --- a/.github/workflows/monthly-tag.yml +++ b/.github/workflows/monthly-tag.yml @@ -32,7 +32,7 @@ jobs: ./scripts/ci/github/generate_changelog.sh ${{ steps.tags.outputs.old }} >> Changelog.md - name: Release snapshot id: release-snapshot - uses: actions/create-release@latest + uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4 latest version, repo archived env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/polkadot-companion-labels.yml b/.github/workflows/polkadot-companion-labels.yml deleted file mode 100644 index 0a5af09358524..0000000000000 --- a/.github/workflows/polkadot-companion-labels.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Check Polkadot Companion and Label - -on: - pull_request: - types: [opened, synchronize] - -jobs: - check_status: - runs-on: ubuntu-latest - steps: - - name: Monitor the status of the gitlab-check-companion-build job - uses: s3krit/await-status-action@v1.0.1 - id: 'check-companion-status' - with: - authToken: ${{ secrets.GITHUB_TOKEN }} - ref: ${{ github.event.pull_request.head.sha }} - contexts: 'continuous-integration/gitlab-check-dependent-polkadot' - timeout: 1800 - notPresentTimeout: 3600 # It can take quite a while before the job starts on Gitlab when the CI queue is large - failureStates: failure - interruptedStates: error # Error = job was probably cancelled. We don't want to label the PR in that case - pollInterval: 30 - - name: Label success - uses: andymckay/labeler@master - if: steps.check-companion-status.outputs.result == 'success' - with: - remove-labels: 'A7-needspolkadotpr' - - name: Label failure - uses: andymckay/labeler@master - if: steps.check-companion-status.outputs.result == 'failure' - with: - add-labels: 'A7-needspolkadotpr' diff --git a/.github/workflows/release-tagging.yml b/.github/workflows/release-tagging.yml index c55fc13a626e0..f7fa913c69709 100644 --- a/.github/workflows/release-tagging.yml +++ b/.github/workflows/release-tagging.yml @@ -12,7 +12,7 @@ jobs: steps: - name: Set Git tag - uses: s3krit/walking-tag-action@master + uses: s3krit/walking-tag-action@d04f7a53b72ceda4e20283736ce3627011275178 # latest version from master with: TAG_NAME: release TAG_MESSAGE: Latest release diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b4933333b3418..765112bfa9e62 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -82,18 +82,31 @@ default: tags: - kubernetes-parity-build -.rust-info-script: &rust-info-script +.rust-info-script: script: - rustup show - cargo --version - rustup +nightly show - cargo +nightly --version +.pipeline-stopper-vars: + script: + - echo "Collecting env variables for the cancel-pipeline job" + - echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env + - echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env + - echo "PR_NUM=${CI_COMMIT_REF_NAME}" >> pipeline-stopper.env + +.pipeline-stopper-artifacts: + artifacts: + reports: + dotenv: pipeline-stopper.env + .docker-env: image: "${CI_IMAGE}" before_script: - !reference [.rust-info-script, script] - !reference [.rusty-cachier, before_script] + - !reference [.pipeline-stopper-vars, script] after_script: - !reference [.rusty-cachier, after_script] tags: @@ -196,7 +209,6 @@ include: # publish jobs - scripts/ci/gitlab/pipeline/publish.yml - #### stage: deploy deploy-prometheus-alerting-rules: @@ -223,10 +235,16 @@ deploy-prometheus-alerting-rules: # This job notifies rusty-cachier about the latest commit with the cache. # This info is later used for the cache distribution and an overlay creation. +# Note that we don't use any .rusty-cachier references as we assume that a pipeline has reached this stage with working rusty-cachier. rusty-cachier-notify: stage: notify - extends: .docker-env + extends: .kubernetes-env + variables: + CI_IMAGE: paritytech/rusty-cachier-env:latest + GIT_STRATEGY: none + dependencies: [] script: + - curl -s https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client/-/raw/release/util/install.sh | bash - rusty-cachier cache notify #### stage: .post @@ -234,23 +252,61 @@ rusty-cachier-notify: # This job cancels the whole pipeline if any of provided jobs fail. # In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests # to fail the pipeline as soon as possible to shorten the feedback loop. -cancel-pipeline: +.cancel-pipeline-template: stage: .post - needs: - - job: test-linux-stable - artifacts: false - - job: test-linux-stable-int - artifacts: false - - job: cargo-check-subkey - artifacts: false - - job: cargo-check-benches - artifacts: false - - job: check-tracing - artifacts: false rules: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs when: on_failure variables: PROJECT_ID: "${CI_PROJECT_ID}" + PROJECT_NAME: "${CI_PROJECT_NAME}" PIPELINE_ID: "${CI_PIPELINE_ID}" - trigger: "parity/infrastructure/ci_cd/pipeline-stopper" + FAILED_JOB_URL: "${FAILED_JOB_URL}" + FAILED_JOB_NAME: "${FAILED_JOB_NAME}" + PR_NUM: "${PR_NUM}" + trigger: + project: "parity/infrastructure/ci_cd/pipeline-stopper" + # remove branch, when pipeline-stopper for polakdot is updated to the same branch + branch: "as-improve" + +# need to copy jobs this way because otherwise gitlab will wait +# for all 3 jobs to finish instead of cancelling if one fails +cancel-pipeline-test-linux-stable1: + extends: .cancel-pipeline-template + needs: + - job: "test-linux-stable 1/3" + +cancel-pipeline-test-linux-stable2: + extends: .cancel-pipeline-template + needs: + - job: "test-linux-stable 2/3" + +cancel-pipeline-test-linux-stable3: + extends: .cancel-pipeline-template + needs: + - job: "test-linux-stable 3/3" + +cancel-pipeline-cargo-check-benches1: + extends: .cancel-pipeline-template + needs: + - job: "cargo-check-benches 1/2" + +cancel-pipeline-cargo-check-benches2: + extends: .cancel-pipeline-template + needs: + - job: "cargo-check-benches 2/2" + +cancel-pipeline-test-linux-stable-int: + extends: .cancel-pipeline-template + needs: + - job: test-linux-stable-int + +cancel-pipeline-cargo-check-subkey: + extends: .cancel-pipeline-template + needs: + - job: cargo-check-subkey + +cancel-pipeline-check-tracing: + extends: .cancel-pipeline-template + needs: + - job: check-tracing diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index 593da06f4a7c0..c6df85194ac99 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -31,7 +31,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for {{pallet}}. @@ -64,22 +64,22 @@ impl WeightInfo for SubstrateWeight { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { - ({{underscore benchmark.base_weight}} as Weight) + Weight::from_ref_time({{underscore benchmark.base_weight}} as RefTimeWeight) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).scalar_saturating_mul({{cw.name}} as RefTimeWeight)) {{/each}} {{#if (ne benchmark.base_reads "0")}} - .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as RefTimeWeight)) {{/if}} {{#each benchmark.component_reads as |cr|}} - .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as RefTimeWeight).saturating_mul({{cr.name}} as RefTimeWeight))) {{/each}} {{#if (ne benchmark.base_writes "0")}} - .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as RefTimeWeight)) {{/if}} {{#each benchmark.component_writes as |cw|}} - .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight))) {{/each}} } {{/each}} @@ -99,22 +99,22 @@ impl WeightInfo for () { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { - ({{underscore benchmark.base_weight}} as Weight) + Weight::from_ref_time({{underscore benchmark.base_weight}} as RefTimeWeight) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).scalar_saturating_mul({{cw.name}} as RefTimeWeight)) {{/each}} {{#if (ne benchmark.base_reads "0")}} - .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight)) + .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as RefTimeWeight)) {{/if}} {{#each benchmark.component_reads as |cr|}} - .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as RefTimeWeight).saturating_mul({{cr.name}} as RefTimeWeight))) {{/each}} {{#if (ne benchmark.base_writes "0")}} - .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight)) + .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as RefTimeWeight)) {{/if}} {{#each benchmark.component_writes as |cw|}} - .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight))) {{/each}} } {{/each}} diff --git a/Cargo.lock b/Cargo.lock index 523d533f52e00..21ffe555e0542 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -230,7 +230,7 @@ dependencies = [ "parking", "polling", "slab", - "socket2 0.4.4", + "socket2", "waker-fn", "winapi", ] @@ -310,7 +310,7 @@ dependencies = [ "futures-io", "futures-util", "pin-utils", - "socket2 0.4.4", + "socket2", "trust-dns-resolver", ] @@ -343,9 +343,9 @@ checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-trait" -version = "0.1.51" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" dependencies = [ "proc-macro2", "quote", @@ -382,12 +382,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "autocfg" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" - [[package]] name = "autocfg" version = "1.0.1" @@ -446,6 +440,7 @@ dependencies = [ name = "beefy-gadget" version = "4.0.0-dev" dependencies = [ + "async-trait", "beefy-primitives", "fnv", "futures", @@ -453,7 +448,8 @@ dependencies = [ "hex", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", + "sc-block-builder", "sc-chain-spec", "sc-client-api", "sc-consensus", @@ -495,7 +491,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-rpc", "sc-utils", "serde", @@ -512,7 +508,7 @@ name = "beefy-merkle-tree" version = "4.0.0-dev" dependencies = [ "beefy-primitives", - "env_logger 0.9.0", + "env_logger", "hex", "hex-literal", "log", @@ -856,6 +852,15 @@ dependencies = [ "nom", ] +[[package]] +name = "cfg-expr" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0aacacf4d96c24b2ad6eb8ee6df040e4f27b0d0b39a5710c30091baa830485db" +dependencies = [ + "smallvec", +] + [[package]] name = "cfg-if" version = "0.1.10" @@ -1005,7 +1010,7 @@ version = "3.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25320346e922cffe59c0bbc5410c8d8784509efb321488971081313cb1e1a33c" dependencies = [ - "heck 0.4.0", + "heck", "proc-macro-error", "proc-macro2", "quote", @@ -1021,15 +1026,6 @@ dependencies = [ "os_str_bytes", ] -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags", -] - [[package]] name = "cmake" version = "0.1.46" @@ -1041,9 +1037,9 @@ dependencies = [ [[package]] name = "comfy-table" -version = "5.0.1" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b103d85ca6e209388771bfb7aa6b68a7aeec4afbf6f0a0264bfbf50360e5212e" +checksum = "121d8a5b0346092c18a4b2fd6f620d7a06f0eb7ac0a45860939a0884bc579c56" dependencies = [ "strum", "strum_macros", @@ -1801,6 +1797,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ed25519-zebra" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "403ef3e961ab98f0ba902771d29f842058578bb1ce7e3c59dad5a6a93e784c69" +dependencies = [ + "curve25519-dalek 3.0.2", + "hex", + "rand_core 0.6.2", + "sha2 0.9.8", + "thiserror", + "zeroize", +] + [[package]] name = "either" version = "1.6.1" @@ -1831,7 +1841,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ - "heck 0.4.0", + "heck", "proc-macro2", "quote", "syn", @@ -1898,19 +1908,6 @@ dependencies = [ "syn", ] -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime 1.3.0", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.9.0" @@ -1918,7 +1915,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" dependencies = [ "atty", - "humantime 2.1.0", + "humantime", "log", "regex", "termcolor", @@ -2008,11 +2005,11 @@ dependencies = [ [[package]] name = "file-per-thread-logger" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fdbe0d94371f9ce939b555dd342d0686cc4c0cadbcd4b61d70af5ff97eb4126" +checksum = "21e16290574b39ee41c71aeb90ae960c504ebaf1e2a1c87bd52aa56ed6e1a02f" dependencies = [ - "env_logger 0.7.1", + "env_logger", "log", ] @@ -2040,7 +2037,7 @@ dependencies = [ "log", "num-traits", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.4", "scale-info", ] @@ -2110,10 +2107,12 @@ dependencies = [ "log", "parity-scale-codec", "paste 1.0.6", + "rusty-fork", "scale-info", "serde", "sp-api", "sp-application-crypto", + "sp-core", "sp-io", "sp-keystore", "sp-runtime", @@ -2276,6 +2275,7 @@ dependencies = [ "serde", "serde_json", "smallvec", + "sp-api", "sp-arithmetic", "sp-core", "sp-core-hashing-proc-macro", @@ -2294,7 +2294,9 @@ name = "frame-support-procedural" version = "4.0.0-dev" dependencies = [ "Inflector", + "cfg-expr", "frame-support-procedural-tools", + "itertools", "proc-macro2", "quote", "syn", @@ -2445,12 +2447,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - [[package]] name = "funty" version = "2.0.0" @@ -2751,9 +2747,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.9" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f072413d126e57991455e0a922b31e4c8ba7c2ffbebf6b78b4f8521397d65cd" +checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" dependencies = [ "bytes", "fnv", @@ -2764,7 +2760,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.6.7", + "tokio-util", "tracing", ] @@ -2814,22 +2810,13 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.12.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c21d40587b92fa6a6c6e3c1bdbf87d75511db5672f9c93175574b3a00df1758" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ "ahash", ] -[[package]] -name = "heck" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "heck" version = "0.4.0" @@ -2918,13 +2905,13 @@ dependencies = [ [[package]] name = "http" -version = "0.2.3" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 0.4.8", + "itoa 1.0.1", ] [[package]] @@ -2950,15 +2937,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error 1.2.3", -] - [[package]] name = "humantime" version = "2.1.0" @@ -2982,7 +2960,7 @@ dependencies = [ "httpdate", "itoa 0.4.8", "pin-project-lite 0.2.6", - "socket2 0.4.4", + "socket2", "tokio", "tower-service", "tracing", @@ -3084,7 +3062,7 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ - "autocfg 1.0.1", + "autocfg", "hashbrown 0.11.2", "serde", ] @@ -3113,6 +3091,12 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec58677acfea8a15352d42fc87d11d63596ade9239e0a7c9352914417515dbe6" +[[package]] +name = "io-lifetimes" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24c3f4eff5495aee4c0399d7b6a0dc2b6e81be84242ffbfcf253ebacccc1d0cb" + [[package]] name = "ip_network" version = "0.4.1" @@ -3125,7 +3109,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" dependencies = [ - "socket2 0.4.4", + "socket2", "widestring", "winapi", "winreg", @@ -3178,9 +3162,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11e017217fcd18da0a25296d3693153dd19c8a6aadab330b3595285d075385d1" +checksum = "8bd0d559d5e679b1ab2f869b486a11182923863b1b3ee8b421763cdd707b783a" dependencies = [ "jsonrpsee-core", "jsonrpsee-http-server", @@ -3193,30 +3177,30 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce395539a14d3ad4ec1256fde105abd36a2da25d578a291cabe98f45adfdb111" +checksum = "8752740ecd374bcbf8b69f3e80b0327942df76f793f8d4e60d3355650c31fb74" dependencies = [ "futures-util", "http", "jsonrpsee-core", "jsonrpsee-types", - "pin-project 1.0.10", + "pin-project", "rustls-native-certs", "soketto", "thiserror", "tokio", "tokio-rustls", - "tokio-util 0.7.1", + "tokio-util", "tracing", "webpki-roots", ] [[package]] name = "jsonrpsee-core" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16efcd4477de857d4a2195a45769b2fe9ebb54f3ef5a4221d3b014a4fe33ec0b" +checksum = "f3dc3e9cf2ba50b7b1d7d76a667619f82846caa39e8e8daa8a4962d74acaddca" dependencies = [ "anyhow", "arrayvec 0.7.2", @@ -3227,10 +3211,11 @@ dependencies = [ "futures-timer", "futures-util", "globset", + "http", "hyper", "jsonrpsee-types", "lazy_static", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.4", "rustc-hash", "serde", @@ -3239,14 +3224,15 @@ dependencies = [ "thiserror", "tokio", "tracing", + "tracing-futures", "unicase", ] [[package]] name = "jsonrpsee-http-server" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdd69efeb3ce2cba767f126872f4eeb4624038a29098e75d77608b2b4345ad03" +checksum = "03802f0373a38c2420c70b5144742d800b509e2937edc4afb116434f07120117" dependencies = [ "futures-channel", "futures-util", @@ -3257,13 +3243,14 @@ dependencies = [ "serde_json", "tokio", "tracing", + "tracing-futures", ] [[package]] name = "jsonrpsee-proc-macros" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "874cf3f6a027cebf36cae767feca9aa2e8a8f799880e49eb5540819fcbd8eada" +checksum = "bd67957d4280217247588ac86614ead007b301ca2fa9f19c19f880a536f029e3" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -3273,9 +3260,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcf76cd316f5d3ad48138085af1f45e2c58c98e02f0779783dbb034d43f7c86" +checksum = "e290bba767401b646812f608c099b922d8142603c9e73a50fb192d3ac86f4a0d" dependencies = [ "anyhow", "beef", @@ -3287,10 +3274,11 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee043cb5dd0d51d3eb93432e998d5bae797691a7b10ec4a325e036bcdb48c48a" +checksum = "6ee5feddd5188e62ac08fcf0e56478138e581509d4730f3f7be9b57dd402a4ff" dependencies = [ + "http", "jsonrpsee-client-transport", "jsonrpsee-core", "jsonrpsee-types", @@ -3298,20 +3286,22 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-server" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2e4d266774a671f8def3794255b28eddd09b18d76e0b913fa439f34588c0a" +checksum = "d488ba74fb369e5ab68926feb75a483458b88e768d44319f37e4ecad283c7325" dependencies = [ "futures-channel", "futures-util", + "http", "jsonrpsee-core", "jsonrpsee-types", "serde_json", "soketto", "tokio", "tokio-stream", - "tokio-util 0.7.1", + "tokio-util", "tracing", + "tracing-futures", ] [[package]] @@ -3343,6 +3333,100 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "kitchensink-runtime" +version = "3.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-election-provider-support", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "log", + "node-primitives", + "pallet-alliance", + "pallet-asset-tx-payment", + "pallet-assets", + "pallet-authority-discovery", + "pallet-authorship", + "pallet-babe", + "pallet-bags-list", + "pallet-balances", + "pallet-bounties", + "pallet-child-bounties", + "pallet-collective", + "pallet-contracts", + "pallet-contracts-primitives", + "pallet-contracts-rpc-runtime-api", + "pallet-conviction-voting", + "pallet-democracy", + "pallet-election-provider-multi-phase", + "pallet-election-provider-support-benchmarking", + "pallet-elections-phragmen", + "pallet-gilt", + "pallet-grandpa", + "pallet-identity", + "pallet-im-online", + "pallet-indices", + "pallet-lottery", + "pallet-membership", + "pallet-mmr", + "pallet-multisig", + "pallet-nomination-pools", + "pallet-nomination-pools-benchmarking", + "pallet-nomination-pools-runtime-api", + "pallet-offences", + "pallet-offences-benchmarking", + "pallet-preimage", + "pallet-proxy", + "pallet-randomness-collective-flip", + "pallet-ranked-collective", + "pallet-recovery", + "pallet-referenda", + "pallet-remark", + "pallet-scheduler", + "pallet-session", + "pallet-session-benchmarking", + "pallet-society", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-state-trie-migration", + "pallet-sudo", + "pallet-timestamp", + "pallet-tips", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-transaction-storage", + "pallet-treasury", + "pallet-uniques", + "pallet-utility", + "pallet-vesting", + "pallet-whitelist", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-authority-discovery", + "sp-block-builder", + "sp-consensus-babe", + "sp-core", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-sandbox", + "sp-session", + "sp-staking", + "sp-std", + "sp-transaction-pool", + "sp-version", + "static_assertions", + "substrate-wasm-builder", +] + [[package]] name = "kv-log-macro" version = "1.0.7" @@ -3370,7 +3454,7 @@ checksum = "ece7e668abd21387aeb6628130a6f4c802787f014fa46bc83221448322250357" dependencies = [ "kvdb", "parity-util-mem", - "parking_lot 0.12.0", + "parking_lot 0.12.1", ] [[package]] @@ -3385,7 +3469,7 @@ dependencies = [ "num_cpus", "owning_ref", "parity-util-mem", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "regex", "rocksdb", "smallvec", @@ -3411,9 +3495,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.121" +version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efaa7b300f3b5fe8eb6bf21ce3895e1751d9665086af2d64b42f19701015ff4f" +checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] name = "libgit2-sys" @@ -3455,9 +3539,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.45.1" +version = "0.46.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41726ee8f662563fafba2d2d484b14037cc8ecb8c953fbfc8439d4ce3a0a9029" +checksum = "81327106887e42d004fbdab1fef93675be2e2e07c1b95fce45e2cc813485611d" dependencies = [ "bytes", "futures", @@ -3466,7 +3550,7 @@ dependencies = [ "instant", "lazy_static", "libp2p-autonat", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-deflate", "libp2p-dns", "libp2p-floodsub", @@ -3491,70 +3575,36 @@ dependencies = [ "libp2p-websocket", "libp2p-yamux", "multiaddr", - "parking_lot 0.12.0", - "pin-project 1.0.10", + "parking_lot 0.12.1", + "pin-project", "rand 0.7.3", "smallvec", ] [[package]] name = "libp2p-autonat" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50de7c1d5c3f040fccb469e8a2d189e068b7627d760dd74ef914071c16bbe905" +checksum = "4decc51f3573653a9f4ecacb31b1b922dd20c25a6322bb15318ec04287ec46f9" dependencies = [ "async-trait", "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-request-response", "libp2p-swarm", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.8.4", ] [[package]] name = "libp2p-core" -version = "0.32.1" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db5b02602099fb75cb2d16f9ea860a320d6eb82ce41e95ab680912c454805cd5" -dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek", - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "lazy_static", - "log", - "multiaddr", - "multihash", - "multistream-select", - "parking_lot 0.12.0", - "pin-project 1.0.10", - "prost 0.9.0", - "prost-build 0.9.0", - "rand 0.8.4", - "ring", - "rw-stream-sink 0.2.1", - "sha2 0.10.2", - "smallvec", - "thiserror", - "unsigned-varint", - "void", - "zeroize", -] - -[[package]] -name = "libp2p-core" -version = "0.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d46fca305dee6757022e2f5a4f6c023315084d0ed7441c3ab244e76666d979" +checksum = "fbf9b94cefab7599b2d3dff2f93bee218c6621d68590b23ede4485813cbcece6" dependencies = [ "asn1_der", "bs58", @@ -3570,13 +3620,13 @@ dependencies = [ "multiaddr", "multihash", "multistream-select", - "parking_lot 0.12.0", - "pin-project 1.0.10", - "prost 0.10.3", - "prost-build 0.10.4", + "parking_lot 0.12.1", + "pin-project", + "prost", + "prost-build", "rand 0.8.4", "ring", - "rw-stream-sink 0.3.0", + "rw-stream-sink", "sha2 0.10.2", "smallvec", "thiserror", @@ -3587,53 +3637,53 @@ dependencies = [ [[package]] name = "libp2p-deflate" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86adefc55ea4ed8201149f052fb441210727481dff1fb0b8318460206a79f5fb" +checksum = "d0183dc2a3da1fbbf85e5b6cf51217f55b14f5daea0c455a9536eef646bfec71" dependencies = [ "flate2", "futures", - "libp2p-core 0.33.0", + "libp2p-core", ] [[package]] name = "libp2p-dns" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb462ec3a51fab457b4b44ac295e8b0a4b04dc175127e615cf996b1f0f1a268" +checksum = "6cbf54723250fa5d521383be789bf60efdabe6bacfb443f87da261019a49b4b5" dependencies = [ "async-std-resolver", "futures", - "libp2p-core 0.33.0", + "libp2p-core", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "smallvec", "trust-dns-resolver", ] [[package]] name = "libp2p-floodsub" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a505d0c6f851cbf2919535150198e530825def8bd3757477f13dc3a57f46cbcc" +checksum = "98a4b6ffd53e355775d24b76f583fdda54b3284806f678499b57913adb94f231" dependencies = [ "cuckoofilter", "fnv", "futures", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.7.3", "smallvec", ] [[package]] name = "libp2p-gossipsub" -version = "0.38.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9be947d8cea8e6b469201314619395826896d2c051053c3723910ba98e68e04" +checksum = "74b4b888cfbeb1f5551acd3aa1366e01bf88ede26cc3c4645d0d2d004d5ca7b0" dependencies = [ "asynchronous-codec", "base64", @@ -3643,12 +3693,12 @@ dependencies = [ "futures", "hex_fmt", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", "prometheus-client", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.7.3", "regex", "sha2 0.10.2", @@ -3659,19 +3709,19 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.36.1" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84b53490442d086db1fa5375670c9666e79143dccadef3f7c74a4346899a984" +checksum = "c50b585518f8efd06f93ac2f976bd672e17cdac794644b3117edd078e96bda06" dependencies = [ "asynchronous-codec", "futures", "futures-timer", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", "lru", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "prost-codec", "smallvec", "thiserror", @@ -3680,9 +3730,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.37.1" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6b5d4de90fcd35feb65ea6223fd78f3b747a64ca4b65e0813fbe66a27d56aa" +checksum = "740862893bb5f06ac24acc9d49bdeadc3a5e52e51818a30a25c1f3519da2c851" dependencies = [ "arrayvec 0.7.2", "asynchronous-codec", @@ -3692,11 +3742,11 @@ dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.7.3", "sha2 0.10.2", "smallvec", @@ -3708,9 +3758,9 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4783f8cf00c7b6c1ff0f1870b4fcf50b042b45533d2e13b6fb464caf447a6951" +checksum = "66e5e5919509603281033fd16306c61df7a4428ce274b67af5e14b07de5cdcb2" dependencies = [ "async-io", "data-encoding", @@ -3718,22 +3768,22 @@ dependencies = [ "futures", "if-watch", "lazy_static", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", "rand 0.8.4", "smallvec", - "socket2 0.4.4", + "socket2", "void", ] [[package]] name = "libp2p-metrics" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4357140141ba9739eee71b20aa735351c0fc642635b2bffc7f57a6b5c1090" +checksum = "ef8aff4a1abef42328fbb30b17c853fff9be986dc39af17ee39f9c5f755c5e0c" dependencies = [ - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-gossipsub", "libp2p-identify", "libp2p-kad", @@ -3745,17 +3795,17 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ff9c893f2367631a711301d703c47432af898c9bb8253bea0e2c051a13f7640" +checksum = "61fd1b20638ec209c5075dfb2e8ce6a7ea4ec3cd3ad7b77f7a477c06d53322e2" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.33.0", + "libp2p-core", "log", "nohash-hasher", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "smallvec", "unsigned-varint", @@ -3763,18 +3813,18 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2cee1dad1c83325bbd182a8e94555778699cec8a9da00086efb7522c4c15ad" +checksum = "762408cb5d84b49a600422d7f9a42c18012d8da6ebcd570f9a4a4290ba41fb6f" dependencies = [ "bytes", "curve25519-dalek 3.0.2", "futures", "lazy_static", - "libp2p-core 0.33.0", + "libp2p-core", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.8.4", "sha2 0.10.2", "snow", @@ -3785,14 +3835,14 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d41516c82fe8dd148ec925eead0c5ec08a0628f7913597e93e126e4dfb4e0787" +checksum = "100a6934ae1dbf8a693a4e7dd1d730fd60b774dafc45688ed63b554497c6c925" dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", "rand 0.7.3", @@ -3801,17 +3851,17 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db007e737adc5d28b2e03223b0210164928ad742591127130796a72aa8eaf54f" +checksum = "be27bf0820a6238a4e06365b096d428271cce85a129cf16f2fe9eb1610c4df86" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.33.0", + "libp2p-core", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "unsigned-varint", "void", ] @@ -3824,7 +3874,7 @@ checksum = "0f1a458bbda880107b5b36fcb9b5a1ef0c329685da0e203ed692a8ebe64cc92c" dependencies = [ "futures", "log", - "pin-project 1.0.10", + "pin-project", "rand 0.7.3", "salsa20", "sha3 0.9.1", @@ -3832,9 +3882,9 @@ dependencies = [ [[package]] name = "libp2p-relay" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624ead3406f64437a0d4567c31bd128a9a0b8226d5f16c074038f5d0fc32f650" +checksum = "4931547ee0cce03971ccc1733ff05bb0c4349fd89120a39e9861e2bbe18843c3" dependencies = [ "asynchronous-codec", "bytes", @@ -3842,12 +3892,12 @@ dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", - "pin-project 1.0.10", - "prost 0.10.3", - "prost-build 0.10.4", + "pin-project", + "prost", + "prost-build", "prost-codec", "rand 0.8.4", "smallvec", @@ -3858,20 +3908,20 @@ dependencies = [ [[package]] name = "libp2p-rendezvous" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59967ea2db2c7560f641aa58ac05982d42131863fcd3dd6dcf0dd1daf81c60c" +checksum = "9511c9672ba33284838e349623319c8cad2d18cfad243ae46c6b7e8a2982ea4e" dependencies = [ "asynchronous-codec", "bimap", "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.8.4", "sha2 0.10.2", "thiserror", @@ -3881,15 +3931,15 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b02e0acb725e5a757d77c96b95298fd73a7394fe82ba7b8bbeea510719cbe441" +checksum = "508a189e2795d892c8f5c1fa1e9e0b1845d32d7b0b249dbf7b05b18811361843" dependencies = [ "async-trait", "bytes", "futures", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", "rand 0.7.3", @@ -3899,18 +3949,18 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.36.1" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4bb21c5abadbf00360c734f16bf87f1712ed4f23cd46148f625d2ddb867346" +checksum = "95ac5be6c2de2d1ff3f7693fda6faf8a827b1f3e808202277783fea9f527d114" dependencies = [ "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "log", - "pin-project 1.0.10", + "pin-project", "rand 0.7.3", "smallvec", "thiserror", @@ -3919,9 +3969,9 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daf2fe8c80b43561355f4d51875273b5b6dfbac37952e8f64b1270769305c9d7" +checksum = "9f54a64b6957249e0ce782f8abf41d97f69330d02bf229f0672d864f0650cc76" dependencies = [ "quote", "syn", @@ -3929,9 +3979,9 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4933e38ef21b50698aefc87799c24f2a365c9d3f6cf50471f3f6a0bc410892" +checksum = "8a6771dc19aa3c65d6af9a8c65222bfc8fcd446630ddca487acd161fa6096f3b" dependencies = [ "async-io", "futures", @@ -3939,32 +3989,32 @@ dependencies = [ "if-watch", "ipnet", "libc", - "libp2p-core 0.33.0", + "libp2p-core", "log", - "socket2 0.4.4", + "socket2", ] [[package]] name = "libp2p-uds" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24bdab114f7f2701757d6541266e1131b429bbae382008f207f2114ee4222dcb" +checksum = "d125e3e5f0d58f3c6ac21815b20cf4b6a88b8db9dc26368ea821838f4161fd4d" dependencies = [ "async-std", "futures", - "libp2p-core 0.32.1", + "libp2p-core", "log", ] [[package]] name = "libp2p-wasm-ext" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f066f2b8b1a1d64793f05da2256e6842ecd0293d6735ca2e9bda89831a1bdc06" +checksum = "ec894790eec3c1608f8d1a8a0bdf0dbeb79ed4de2dce964222011c2896dfa05a" dependencies = [ "futures", "js-sys", - "libp2p-core 0.33.0", + "libp2p-core", "parity-send-wrapper", "wasm-bindgen", "wasm-bindgen-futures", @@ -3972,18 +4022,18 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39d398fbb29f432c4128fabdaac2ed155c3bcaf1b9bd40eeeb10a471eefacbf5" +checksum = "9808e57e81be76ff841c106b4c5974fb4d41a233a7bdd2afbf1687ac6def3818" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.33.0", + "libp2p-core", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "quicksink", - "rw-stream-sink 0.3.0", + "rw-stream-sink", "soketto", "url", "webpki-roots", @@ -3991,13 +4041,13 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fe653639ad74877c759720febb0cbcbf4caa221adde4eed2d3126ce5c6f381f" +checksum = "c6dea686217a06072033dc025631932810e2f6ad784e4fafa42e27d311c7a81c" dependencies = [ "futures", - "libp2p-core 0.33.0", - "parking_lot 0.12.0", + "libp2p-core", + "parking_lot 0.12.1", "thiserror", "yamux", ] @@ -4108,6 +4158,12 @@ version = "0.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5284f00d480e1c39af34e72f8ad60b94f47007e3481cd3b731c1d67190ddc7b7" +[[package]] +name = "linux-raw-sys" +version = "0.0.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d" + [[package]] name = "lite-json" version = "0.1.3" @@ -4298,7 +4354,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" dependencies = [ - "autocfg 1.0.1", + "autocfg", ] [[package]] @@ -4308,7 +4364,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6566c70c1016f525ced45d7b7f97730a2bafb037c788211d0c186ef5b2189f0a" dependencies = [ "hash-db", - "hashbrown 0.12.0", + "hashbrown 0.12.3", "parity-util-mem", ] @@ -4343,30 +4399,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ "adler", - "autocfg 1.0.1", + "autocfg", ] [[package]] name = "mio" -version = "0.8.0" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba272f85fa0b41fc91872be579b3bbe0f56b792aa361a380eb669469f68dafb2" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", - "miow", - "ntapi", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" -dependencies = [ - "socket2 0.3.19", - "winapi", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.36.1", ] [[package]] @@ -4450,7 +4495,7 @@ dependencies = [ "bytes", "futures", "log", - "pin-project 1.0.10", + "pin-project", "smallvec", "unsigned-varint", ] @@ -4594,12 +4639,12 @@ dependencies = [ "futures", "hash-db", "hex", + "kitchensink-runtime", "kvdb", "kvdb-rocksdb", "lazy_static", "log", "node-primitives", - "node-runtime", "node-testing", "parity-db", "parity-util-mem", @@ -4636,13 +4681,13 @@ dependencies = [ "futures", "hex-literal", "jsonrpsee", + "kitchensink-runtime", "log", "nix 0.23.1", "node-executor", "node-inspect", "node-primitives", "node-rpc", - "node-runtime", "pallet-asset-tx-payment", "pallet-balances", "pallet-im-online", @@ -4669,6 +4714,7 @@ dependencies = [ "sc-finality-grandpa", "sc-keystore", "sc-network", + "sc-network-common", "sc-rpc", "sc-service", "sc-service-test", @@ -4714,12 +4760,13 @@ dependencies = [ "frame-support", "frame-system", "futures", + "kitchensink-runtime", "node-primitives", - "node-runtime", "node-testing", "pallet-balances", "pallet-contracts", "pallet-im-online", + "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", "pallet-treasury", @@ -4798,106 +4845,13 @@ dependencies = [ "substrate-state-trie-migration-rpc", ] -[[package]] -name = "node-runtime" -version = "3.0.0-dev" -dependencies = [ - "frame-benchmarking", - "frame-election-provider-support", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "hex-literal", - "log", - "node-primitives", - "pallet-alliance", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-authority-discovery", - "pallet-authorship", - "pallet-babe", - "pallet-bags-list", - "pallet-balances", - "pallet-bounties", - "pallet-child-bounties", - "pallet-collective", - "pallet-contracts", - "pallet-contracts-primitives", - "pallet-contracts-rpc-runtime-api", - "pallet-conviction-voting", - "pallet-democracy", - "pallet-election-provider-multi-phase", - "pallet-election-provider-support-benchmarking", - "pallet-elections-phragmen", - "pallet-gilt", - "pallet-grandpa", - "pallet-identity", - "pallet-im-online", - "pallet-indices", - "pallet-lottery", - "pallet-membership", - "pallet-mmr", - "pallet-multisig", - "pallet-nomination-pools", - "pallet-nomination-pools-benchmarking", - "pallet-offences", - "pallet-offences-benchmarking", - "pallet-preimage", - "pallet-proxy", - "pallet-randomness-collective-flip", - "pallet-ranked-collective", - "pallet-recovery", - "pallet-referenda", - "pallet-remark", - "pallet-scheduler", - "pallet-session", - "pallet-session-benchmarking", - "pallet-society", - "pallet-staking", - "pallet-staking-reward-curve", - "pallet-state-trie-migration", - "pallet-sudo", - "pallet-timestamp", - "pallet-tips", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-transaction-storage", - "pallet-treasury", - "pallet-uniques", - "pallet-utility", - "pallet-vesting", - "pallet-whitelist", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-authority-discovery", - "sp-block-builder", - "sp-consensus-babe", - "sp-core", - "sp-inherents", - "sp-io", - "sp-offchain", - "sp-runtime", - "sp-sandbox", - "sp-session", - "sp-staking", - "sp-std", - "sp-transaction-pool", - "sp-version", - "static_assertions", - "substrate-wasm-builder", -] - [[package]] name = "node-runtime-generate-bags" version = "3.0.0" dependencies = [ "clap 3.1.18", "generate-bags", - "node-runtime", + "kitchensink-runtime", ] [[package]] @@ -4986,10 +4940,10 @@ dependencies = [ "frame-system", "fs_extra", "futures", + "kitchensink-runtime", "log", "node-executor", "node-primitives", - "node-runtime", "pallet-asset-tx-payment", "pallet-transaction-payment", "parity-scale-codec", @@ -5036,22 +4990,13 @@ dependencies = [ "version_check", ] -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi", -] - [[package]] name = "num-bigint" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ - "autocfg 1.0.1", + "autocfg", "num-integer", "num-traits", ] @@ -5081,7 +5026,7 @@ version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ - "autocfg 1.0.1", + "autocfg", "num-traits", ] @@ -5091,7 +5036,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ - "autocfg 1.0.1", + "autocfg", "num-bigint", "num-integer", "num-traits", @@ -5103,7 +5048,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" dependencies = [ - "autocfg 1.0.1", + "autocfg", "num-integer", "num-traits", ] @@ -5114,7 +5059,7 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ - "autocfg 1.0.1", + "autocfg", "libm", ] @@ -5521,11 +5466,12 @@ version = "4.0.0-dev" dependencies = [ "assert_matches", "bitflags", - "env_logger 0.9.0", + "env_logger", "frame-benchmarking", "frame-support", "frame-system", "hex-literal", + "impl-trait-for-tuples", "log", "pallet-balances", "pallet-contracts-primitives", @@ -5654,7 +5600,7 @@ dependencies = [ "pallet-balances", "pallet-election-provider-support-benchmarking", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "scale-info", "sp-arithmetic", @@ -5696,6 +5642,7 @@ dependencies = [ "sp-npos-elections", "sp-runtime", "sp-std", + "sp-tracing", "substrate-test-utils", ] @@ -5887,11 +5834,12 @@ name = "pallet-mmr" version = "4.0.0-dev" dependencies = [ "ckb-merkle-mountain-range", - "env_logger 0.9.0", + "env_logger", "frame-benchmarking", "frame-support", "frame-system", "hex-literal", + "itertools", "parity-scale-codec", "scale-info", "sp-core", @@ -6003,6 +5951,15 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-nomination-pools-runtime-api" +version = "1.0.0-dev" +dependencies = [ + "parity-scale-codec", + "sp-api", + "sp-std", +] + [[package]] name = "pallet-nomination-pools-test-staking" version = "1.0.0" @@ -6342,7 +6299,7 @@ dependencies = [ "log", "pallet-balances", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "remote-externalities", "scale-info", "serde", @@ -6573,9 +6530,9 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.3.13" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55a7901b85874402471e131de3332dde0e51f38432c69a3853627c8e25433048" +checksum = "2bb474d0ed0836e185cb998a6b140ed1073d1fbf27d690ecf9ede8030289382c" dependencies = [ "blake2-rfc", "crc32fast", @@ -6599,6 +6556,7 @@ dependencies = [ "arrayvec 0.7.2", "bitvec", "byte-slice-cast", + "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", "serde", @@ -6629,10 +6587,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c32561d248d352148124f036cac253a644685a21dc9fea383eb4907d7bd35a8f" dependencies = [ "cfg-if 1.0.0", - "hashbrown 0.12.0", + "hashbrown 0.12.3", "impl-trait-for-tuples", "parity-util-mem-derive", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "primitive-types", "smallvec", "winapi", @@ -6683,9 +6641,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core 0.9.1", @@ -6715,7 +6673,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-sys", + "windows-sys 0.32.0", ] [[package]] @@ -6826,33 +6784,13 @@ dependencies = [ "indexmap", ] -[[package]] -name = "pin-project" -version = "0.4.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9615c18d31137579e9ff063499264ddc1278e7b1982757ebc111028c4d1dc909" -dependencies = [ - "pin-project-internal 0.4.29", -] - [[package]] name = "pin-project" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ - "pin-project-internal 1.0.10", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "pin-project-internal", ] [[package]] @@ -7100,48 +7038,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8e12d01b9d66ad9eb4529c57666b6263fc1993cb30261d83ead658fdd932652" dependencies = [ "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "prost" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" -dependencies = [ - "bytes", - "prost-derive 0.9.0", -] - -[[package]] -name = "prost" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc03e116981ff7d8da8e5c220e374587b98d294af7ba7dd7fda761158f00086f" -dependencies = [ - "bytes", - "prost-derive 0.10.1", + "quote", + "syn", ] [[package]] -name = "prost-build" -version = "0.9.0" +name = "prost" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" +checksum = "bc03e116981ff7d8da8e5c220e374587b98d294af7ba7dd7fda761158f00086f" dependencies = [ "bytes", - "heck 0.3.2", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prost 0.9.0", - "prost-types 0.9.0", - "regex", - "tempfile", - "which", + "prost-derive", ] [[package]] @@ -7153,14 +7061,14 @@ dependencies = [ "bytes", "cfg-if 1.0.0", "cmake", - "heck 0.4.0", + "heck", "itertools", "lazy_static", "log", "multimap", "petgraph", - "prost 0.10.3", - "prost-types 0.10.1", + "prost", + "prost-types", "regex", "tempfile", "which", @@ -7174,24 +7082,11 @@ checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" dependencies = [ "asynchronous-codec", "bytes", - "prost 0.10.3", + "prost", "thiserror", "unsigned-varint", ] -[[package]] -name = "prost-derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "prost-derive" version = "0.10.1" @@ -7205,16 +7100,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost-types" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" -dependencies = [ - "bytes", - "prost 0.9.0", -] - [[package]] name = "prost-types" version = "0.10.1" @@ -7222,7 +7107,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" dependencies = [ "bytes", - "prost 0.10.3", + "prost", ] [[package]] @@ -7301,25 +7186,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -dependencies = [ - "autocfg 0.1.7", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc 0.1.0", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg 0.1.2", - "rand_xorshift", - "winapi", -] - [[package]] name = "rand" version = "0.7.3" @@ -7346,16 +7212,6 @@ dependencies = [ "rand_hc 0.3.0", ] -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.3.1", -] - [[package]] name = "rand_chacha" version = "0.2.2" @@ -7376,21 +7232,6 @@ dependencies = [ "rand_core 0.6.2", ] -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - [[package]] name = "rand_core" version = "0.5.1" @@ -7419,15 +7260,6 @@ dependencies = [ "rand 0.8.4", ] -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "rand_hc" version = "0.2.0" @@ -7446,50 +7278,6 @@ dependencies = [ "rand_core 0.6.2", ] -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "winapi", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.4.2", -] - [[package]] name = "rand_pcg" version = "0.2.1" @@ -7508,15 +7296,6 @@ dependencies = [ "rand_core 0.6.2", ] -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "rawpointer" version = "0.2.1" @@ -7529,7 +7308,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" dependencies = [ - "autocfg 1.0.1", + "autocfg", "crossbeam-deque", "either", "rayon-core", @@ -7548,15 +7327,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "redox_syscall" version = "0.2.10" @@ -7674,7 +7444,7 @@ dependencies = [ name = "remote-externalities" version = "0.10.0-dev" dependencies = [ - "env_logger 0.9.0", + "env_logger", "frame-support", "jsonrpsee", "log", @@ -7717,12 +7487,6 @@ dependencies = [ "quick-error 1.2.3", ] -[[package]] -name = "retain_mut" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "448296241d034b96c11173591deaa1302f2c17b56092106c1f92c1bc0183a8c9" - [[package]] name = "rfc6979" version = "0.1.0" @@ -7756,7 +7520,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f08c8062c1fe1253064043b8fc07bfea1b9702b71b4a86c11ea3588183b12e1" dependencies = [ "bytecheck", - "hashbrown 0.12.0", + "hashbrown 0.12.3", "ptr_meta", "rend", "rkyv_derive", @@ -7786,9 +7550,9 @@ dependencies = [ [[package]] name = "rpassword" -version = "5.0.1" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb" +checksum = "26b763cb66df1c928432cc35053f8bd4cec3335d8559fc16010017d16b3c1680" dependencies = [ "libc", "winapi", @@ -7853,12 +7617,26 @@ checksum = "938a344304321a9da4973b9ff4f9f8db9caf4597dfd9dda6a60b523340a0fff0" dependencies = [ "bitflags", "errno", - "io-lifetimes", + "io-lifetimes 0.5.3", "libc", - "linux-raw-sys", + "linux-raw-sys 0.0.42", "winapi", ] +[[package]] +name = "rustix" +version = "0.35.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef258c11e17f5c01979a10543a30a4e12faef6aab217a74266e747eefa3aed88" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes 0.7.2", + "libc", + "linux-raw-sys 0.0.46", + "windows-sys 0.36.1", +] + [[package]] name = "rustls" version = "0.20.2" @@ -7899,14 +7677,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" [[package]] -name = "rw-stream-sink" -version = "0.2.1" +name = "rusty-fork" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ - "futures", - "pin-project 0.4.29", - "static_assertions", + "fnv", + "quick-error 1.2.3", + "tempfile", ] [[package]] @@ -7916,7 +7694,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" dependencies = [ "futures", - "pin-project 1.0.10", + "pin-project", "static_assertions", ] @@ -7967,19 +7745,18 @@ dependencies = [ name = "sc-authority-discovery" version = "0.10.0-dev" dependencies = [ - "async-trait", "futures", "futures-timer", "ip_network", "libp2p", "log", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "quickcheck", "rand 0.7.3", "sc-client-api", - "sc-network", + "sc-network-common", "sp-api", "sp-authority-discovery", "sp-blockchain", @@ -8000,7 +7777,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -8041,7 +7818,7 @@ dependencies = [ "memmap2 0.5.0", "parity-scale-codec", "sc-chain-spec-derive", - "sc-network", + "sc-network-common", "sc-telemetry", "serde", "serde_json", @@ -8107,7 +7884,7 @@ dependencies = [ "hash-db", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-executor", "sc-transaction-pool-api", "sc-utils", @@ -8132,7 +7909,9 @@ dependencies = [ name = "sc-client-db" version = "0.10.0-dev" dependencies = [ + "criterion", "hash-db", + "kitchensink-runtime", "kvdb", "kvdb-memorydb", "kvdb-rocksdb", @@ -8140,8 +7919,9 @@ dependencies = [ "log", "parity-db", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "quickcheck", + "rand 0.8.4", "sc-client-api", "sc-state-db", "sp-arithmetic", @@ -8165,7 +7945,7 @@ dependencies = [ "futures-timer", "libp2p", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-client-api", "sc-utils", "serde", @@ -8188,7 +7968,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -8230,10 +8010,9 @@ dependencies = [ "num-rational 0.2.4", "num-traits", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "rand_chacha 0.2.2", - "retain_mut", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -8353,7 +8132,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-client-api", "sc-consensus", "sp-api", @@ -8408,13 +8187,13 @@ name = "sc-executor" version = "0.10.0-dev" dependencies = [ "criterion", - "env_logger 0.9.0", + "env_logger", "hex-literal", "lazy_static", "lru", "num_cpus", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "paste 1.0.6", "regex", "sc-executor-common", @@ -8453,7 +8232,6 @@ dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", "sp-sandbox", - "sp-serializer", "sp-wasm-interface", "thiserror", "wasm-instrument", @@ -8482,9 +8260,12 @@ dependencies = [ "cfg-if 1.0.0", "libc", "log", + "once_cell", "parity-scale-codec", "parity-wasm 0.42.2", "paste 1.0.6", + "rustix 0.33.7", + "rustix 0.35.6", "sc-allocator", "sc-executor-common", "sc-runtime-test", @@ -8512,7 +8293,7 @@ dependencies = [ "hex", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.4", "sc-block-builder", "sc-chain-spec", @@ -8520,6 +8301,7 @@ dependencies = [ "sc-consensus", "sc-keystore", "sc-network", + "sc-network-common", "sc-network-gossip", "sc-network-test", "sc-telemetry", @@ -8579,7 +8361,7 @@ dependencies = [ "log", "parity-util-mem", "sc-client-api", - "sc-network", + "sc-network-common", "sc-transaction-pool-api", "sp-blockchain", "sp-runtime", @@ -8591,7 +8373,7 @@ version = "4.0.0-dev" dependencies = [ "async-trait", "hex", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "serde_json", "sp-application-crypto", "sp-core", @@ -8624,10 +8406,10 @@ dependencies = [ "log", "lru", "parity-scale-codec", - "parking_lot 0.12.0", - "pin-project 1.0.10", - "prost 0.10.3", - "prost-build 0.10.4", + "parking_lot 0.12.1", + "pin-project", + "prost", + "prost-build", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -8644,7 +8426,6 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", - "sp-finality-grandpa", "sp-runtime", "sp-test-primitives", "sp-tracing", @@ -8662,12 +8443,21 @@ dependencies = [ name = "sc-network-common" version = "0.10.0-dev" dependencies = [ + "async-trait", + "bitflags", + "bytes", "futures", "libp2p", "parity-scale-codec", - "prost-build 0.10.4", + "prost-build", + "sc-consensus", "sc-peerset", + "serde", "smallvec", + "sp-consensus", + "sp-finality-grandpa", + "sp-runtime", + "thiserror", ] [[package]] @@ -8682,7 +8472,8 @@ dependencies = [ "log", "lru", "quickcheck", - "sc-network", + "sc-network-common", + "sc-peerset", "sp-runtime", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -8694,11 +8485,12 @@ name = "sc-network-light" version = "0.10.0-dev" dependencies = [ "futures", + "hex", "libp2p", "log", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "sc-client-api", "sc-network-common", "sc-peerset", @@ -8712,16 +8504,15 @@ dependencies = [ name = "sc-network-sync" version = "0.10.0-dev" dependencies = [ - "bitflags", - "either", "fork-tree", "futures", + "hex", "libp2p", "log", "lru", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "quickcheck", "sc-block-builder", "sc-client-api", @@ -8751,13 +8542,15 @@ dependencies = [ "futures-timer", "libp2p", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "sc-block-builder", "sc-client-api", "sc-consensus", "sc-network", "sc-network-common", + "sc-network-light", + "sc-network-sync", "sc-service", "sp-blockchain", "sp-consensus", @@ -8781,15 +8574,17 @@ dependencies = [ "hyper", "hyper-rustls", "lazy_static", + "libp2p", "num_cpus", "once_cell", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "sc-block-builder", "sc-client-api", "sc-client-db", - "sc-network", + "sc-network-common", + "sc-peerset", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", @@ -8831,18 +8626,19 @@ name = "sc-rpc" version = "4.0.0-dev" dependencies = [ "assert_matches", - "env_logger 0.9.0", + "env_logger", "futures", "hash-db", "jsonrpsee", "lazy_static", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-block-builder", "sc-chain-spec", "sc-client-api", "sc-network", + "sc-network-common", "sc-rpc-api", "sc-tracing", "sc-transaction-pool", @@ -8872,7 +8668,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-chain-spec", "sc-transaction-pool-api", "scale-info", @@ -8927,8 +8723,8 @@ dependencies = [ "log", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.12.0", - "pin-project 1.0.10", + "parking_lot 0.12.1", + "pin-project", "rand 0.7.3", "sc-block-builder", "sc-chain-spec", @@ -8940,6 +8736,8 @@ dependencies = [ "sc-keystore", "sc-network", "sc-network-common", + "sc-network-light", + "sc-network-sync", "sc-offchain", "sc-rpc", "sc-rpc-server", @@ -8989,13 +8787,14 @@ dependencies = [ "hex-literal", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-block-builder", "sc-client-api", "sc-client-db", "sc-consensus", "sc-executor", "sc-network", + "sc-network-common", "sc-service", "sc-transaction-pool-api", "sp-api", @@ -9023,7 +8822,7 @@ dependencies = [ "parity-scale-codec", "parity-util-mem", "parity-util-mem-derive", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-client-api", "sp-core", ] @@ -9072,8 +8871,8 @@ dependencies = [ "futures", "libp2p", "log", - "parking_lot 0.12.0", - "pin-project 1.0.10", + "parking_lot 0.12.1", + "pin-project", "rand 0.7.3", "serde", "serde_json", @@ -9093,7 +8892,7 @@ dependencies = [ "libc", "log", "once_cell", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "regex", "rustc-hash", "sc-client-api", @@ -9135,8 +8934,7 @@ dependencies = [ "log", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.12.0", - "retain_mut", + "parking_lot 0.12.1", "sc-block-builder", "sc-client-api", "sc-transaction-pool-api", @@ -9176,7 +8974,7 @@ dependencies = [ "futures-timer", "lazy_static", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "prometheus", "tokio-test", ] @@ -9271,19 +9069,18 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.21.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7883017d5b21f011ef8040ea9c6c7ac90834c0df26a69e4c0b06276151f125" +checksum = "b7649a0b3ffb32636e60c7ce0d70511eda9c52c658cd0634e194d5a19943aeff" dependencies = [ - "rand 0.6.5", "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.4.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" +checksum = "7058dc8eaf3f2810d7828680320acda0b25a288f6d288e19278e249bbf74226b" dependencies = [ "cc", ] @@ -9394,9 +9191,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.79" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" +checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ "itoa 1.0.1", "ryu", @@ -9592,17 +9389,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "socket2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.4.4" @@ -9642,6 +9428,7 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-test-primitives", + "sp-trie", "sp-version", "thiserror", ] @@ -9772,7 +9559,7 @@ dependencies = [ "log", "lru", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sp-api", "sp-consensus", "sp-database", @@ -9885,7 +9672,7 @@ dependencies = [ "byteorder", "criterion", "dyn-clonable", - "ed25519-dalek", + "ed25519-zebra", "futures", "hash-db", "hash256-std-hasher", @@ -9899,7 +9686,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "primitive-types", "rand 0.7.3", "regex", @@ -9953,7 +9740,7 @@ name = "sp-database" version = "4.0.0-dev" dependencies = [ "kvdb", - "parking_lot 0.12.0", + "parking_lot 0.12.1", ] [[package]] @@ -10010,12 +9797,13 @@ dependencies = [ name = "sp-io" version = "6.0.0" dependencies = [ + "bytes", "futures", "hash-db", "libsecp256k1", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "secp256k1", "sp-core", "sp-externalities", @@ -10048,7 +9836,7 @@ dependencies = [ "futures", "merlin", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "rand_chacha 0.2.2", "schnorrkel", @@ -10168,6 +9956,7 @@ dependencies = [ name = "sp-runtime-interface" version = "6.0.0" dependencies = [ + "bytes", "impl-trait-for-tuples", "parity-scale-codec", "primitive-types", @@ -10217,6 +10006,7 @@ dependencies = [ name = "sp-runtime-interface-test-wasm" version = "2.0.0" dependencies = [ + "bytes", "sp-core", "sp-io", "sp-runtime-interface", @@ -10291,7 +10081,7 @@ dependencies = [ "log", "num-traits", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "pretty_assertions", "rand 0.7.3", "smallvec", @@ -10303,6 +10093,7 @@ dependencies = [ "sp-trie", "thiserror", "tracing", + "trie-db", "trie-root", ] @@ -10400,16 +10191,23 @@ dependencies = [ name = "sp-trie" version = "6.0.0" dependencies = [ + "ahash", "criterion", "hash-db", + "hashbrown 0.12.3", "hex-literal", + "lazy_static", + "lru", "memory-db", + "nohash-hasher", "parity-scale-codec", + "parking_lot 0.12.1", "scale-info", "sp-core", "sp-runtime", "sp-std", "thiserror", + "tracing", "trie-bench", "trie-db", "trie-root", @@ -10509,20 +10307,20 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strum" -version = "0.23.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cae14b91c7d11c9a851d3fbc80a963198998c2a64eec840477fa92d8ce9b70bb" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ "strum_macros", ] [[package]] name = "strum_macros" -version = "0.23.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb0dc7ee9c15cea6199cde9a127fa16a4c5819af85395457ad72d68edc85a38" +checksum = "4faebde00e8ff94316c01800f9054fd2ba77d30d9e922541913051d1d978918b" dependencies = [ - "heck 0.3.2", + "heck", "proc-macro2", "quote", "rustversion", @@ -10740,7 +10538,7 @@ version = "2.0.0" dependencies = [ "futures", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-transaction-pool", "sc-transaction-pool-api", "sp-blockchain", @@ -11026,10 +10824,10 @@ dependencies = [ "mio", "num_cpus", "once_cell", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "pin-project-lite 0.2.6", "signal-hook-registry", - "socket2 0.4.4", + "socket2", "tokio-macros", "winapi", ] @@ -11080,20 +10878,6 @@ dependencies = [ "tokio-stream", ] -[[package]] -name = "tokio-util" -version = "0.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.6", - "tokio", -] - [[package]] name = "tokio-util" version = "0.7.1" @@ -11106,6 +10890,7 @@ dependencies = [ "futures-sink", "pin-project-lite 0.2.6", "tokio", + "tracing", ] [[package]] @@ -11125,9 +10910,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.29" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" +checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ "cfg-if 1.0.0", "log", @@ -11138,9 +10923,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.18" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" +checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" dependencies = [ "proc-macro2", "quote", @@ -11149,11 +10934,11 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.26" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" +checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" dependencies = [ - "lazy_static", + "once_cell", "valuable", ] @@ -11163,7 +10948,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.10", + "pin-project", "tracing", ] @@ -11173,10 +10958,8 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ - "ahash", "lazy_static", "log", - "lru", "tracing-core", ] @@ -11221,9 +11004,9 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ecec5d10427b35e9ae374b059dccc0801d02d832617c04c78afc7a8c5c4a34" +checksum = "c5704f0d6130bd83608e4370c19e20c8a6ec03e80363e493d0234efca005265a" dependencies = [ "criterion", "hash-db", @@ -11237,12 +11020,12 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.23.1" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" +checksum = "004e1e8f92535694b4cb1444dc5a8073ecf0815e3357f729638b9f8fc4062908" dependencies = [ "hash-db", - "hashbrown 0.12.0", + "hashbrown 0.12.3", "log", "rustc-hex", "smallvec", @@ -11303,7 +11086,7 @@ dependencies = [ "lazy_static", "log", "lru-cache", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "resolv-conf", "smallvec", "thiserror", @@ -11431,12 +11214,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" - [[package]] name = "unicode-width" version = "0.1.8" @@ -11577,6 +11354,12 @@ version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + [[package]] name = "wasm-bindgen" version = "0.2.77" @@ -11975,7 +11758,7 @@ dependencies = [ "directories-next", "file-per-thread-logger", "log", - "rustix", + "rustix 0.33.7", "serde", "sha2 0.9.8", "toml", @@ -12041,7 +11824,7 @@ dependencies = [ "object 0.28.3", "region 2.2.0", "rustc-demangle", - "rustix", + "rustix 0.33.7", "serde", "target-lexicon", "thiserror", @@ -12059,7 +11842,7 @@ checksum = "e6d5dd480cc6dc0a401653e45b79796a3317f8228990d84bc2271bdaf0810071" dependencies = [ "lazy_static", "object 0.28.3", - "rustix", + "rustix 0.33.7", ] [[package]] @@ -12081,7 +11864,7 @@ dependencies = [ "more-asserts", "rand 0.8.4", "region 2.2.0", - "rustix", + "rustix 0.33.7", "thiserror", "wasmtime-environ", "wasmtime-jit-debug", @@ -12229,6 +12012,19 @@ dependencies = [ "windows_x86_64_msvc 0.32.0", ] +[[package]] +name = "windows-sys" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +dependencies = [ + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", +] + [[package]] name = "windows_aarch64_msvc" version = "0.29.0" @@ -12241,6 +12037,12 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + [[package]] name = "windows_i686_gnu" version = "0.29.0" @@ -12253,6 +12055,12 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + [[package]] name = "windows_i686_msvc" version = "0.29.0" @@ -12265,6 +12073,12 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + [[package]] name = "windows_x86_64_gnu" version = "0.29.0" @@ -12277,6 +12091,12 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + [[package]] name = "windows_x86_64_msvc" version = "0.29.0" @@ -12289,6 +12109,12 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + [[package]] name = "winreg" version = "0.7.0" @@ -12327,7 +12153,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.4", "static_assertions", ] diff --git a/Cargo.toml b/Cargo.toml index 9909e6f893877..e2907716ca9f2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,6 +116,7 @@ members = [ "frame/nomination-pools", "frame/nomination-pools/benchmarking", "frame/nomination-pools/test-staking", + "frame/nomination-pools/runtime-api", "frame/randomness-collective-flip", "frame/ranked-collective", "frame/recovery", @@ -257,7 +258,7 @@ crc32fast = { opt-level = 3 } crossbeam-deque = { opt-level = 3 } crypto-mac = { opt-level = 3 } curve25519-dalek = { opt-level = 3 } -ed25519-dalek = { opt-level = 3 } +ed25519-zebra = { opt-level = 3 } flate2 = { opt-level = 3 } futures-channel = { opt-level = 3 } hashbrown = { opt-level = 3 } diff --git a/README.md b/README.md index b716794428a00..c609641af7ce2 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Substrate is a next-generation framework for blockchain innovation 🚀. ## Trying it out Simply go to [docs.substrate.io](https://docs.substrate.io) and follow the -[installation](https://docs.substrate.io/v3/getting-started/overview) instructions. You can +[installation](https://docs.substrate.io/main-docs/install/) instructions. You can also try out one of the [tutorials](https://docs.substrate.io/tutorials/). ## Contributions & Code of Conduct diff --git a/bin/node-template/README.md b/bin/node-template/README.md index 8defb870fa1b0..0f6fd9450aeee 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -114,7 +114,7 @@ local node template. ### Multi-Node Local Testnet If you want to see the multi-node consensus algorithm in action, refer to our -[Start a Private Network tutorial](https://docs.substrate.io/tutorials/v3/private-network). +[Simulate a network tutorial](https://docs.substrate.io/tutorials/get-started/simulate-network/). ## Template Structure @@ -129,7 +129,7 @@ Substrate-based blockchain nodes expose a number of capabilities: - Networking: Substrate nodes use the [`libp2p`](https://libp2p.io/) networking stack to allow the nodes in the network to communicate with one another. - Consensus: Blockchains must have a way to come to - [consensus](https://docs.substrate.io/v3/advanced/consensus) on the state of the + [consensus](https://docs.substrate.io/main-docs/fundamentals/consensus/) on the state of the network. Substrate makes it possible to supply custom consensus engines and also ships with several consensus mechanisms that have been built on top of [Web3 Foundation research](https://research.web3.foundation/en/latest/polkadot/NPoS/index.html). @@ -138,22 +138,20 @@ Substrate-based blockchain nodes expose a number of capabilities: There are several files in the `node` directory - take special note of the following: - [`chain_spec.rs`](./node/src/chain_spec.rs): A - [chain specification](https://docs.substrate.io/v3/runtime/chain-specs) is a + [chain specification](https://docs.substrate.io/main-docs/build/chain-spec/) is a source code file that defines a Substrate chain's initial (genesis) state. Chain specifications are useful for development and testing, and critical when architecting the launch of a production chain. Take note of the `development_config` and `testnet_genesis` functions, which are used to define the genesis state for the local development chain configuration. These functions identify some - [well-known accounts](https://docs.substrate.io/v3/tools/subkey#well-known-keys) + [well-known accounts](https://docs.substrate.io/reference/command-line-tools/subkey/) and use them to configure the blockchain's initial state. - [`service.rs`](./node/src/service.rs): This file defines the node implementation. Take note of the libraries that this file imports and the names of the functions it invokes. In particular, there are references to consensus-related topics, such as the - [longest chain rule](https://docs.substrate.io/v3/advanced/consensus#longest-chain-rule), - the [Aura](https://docs.substrate.io/v3/advanced/consensus#aura) block authoring - mechanism and the - [GRANDPA](https://docs.substrate.io/v3/advanced/consensus#grandpa) finality - gadget. + [block finalization and forks](https://docs.substrate.io/main-docs/fundamentals/consensus/#finalization-and-forks) + and other [consensus mechanisms](https://docs.substrate.io/main-docs/fundamentals/consensus/#default-consensus-models) + such as Aura for block authoring and GRANDPA for finality. After the node has been [built](#build), refer to the embedded documentation to learn more about the capabilities and configuration parameters that it exposes: @@ -165,16 +163,15 @@ capabilities and configuration parameters that it exposes: ### Runtime In Substrate, the terms -"[runtime](https://docs.substrate.io/v3/getting-started/glossary#runtime)" and -"[state transition function](https://docs.substrate.io/v3/getting-started/glossary#state-transition-function-stf)" +"runtime" and "state transition function" are analogous - they refer to the core logic of the blockchain that is responsible for validating blocks and executing the state changes they define. The Substrate project in this repository uses -the [FRAME](https://docs.substrate.io/v3/runtime/frame) framework to construct a +[FRAME](https://docs.substrate.io/main-docs/fundamentals/runtime-intro/#frame) to construct a blockchain runtime. FRAME allows runtime developers to declare domain-specific logic in modules called "pallets". At the heart of FRAME is a helpful -[macro language](https://docs.substrate.io/v3/runtime/macros) that makes it easy to +[macro language](https://docs.substrate.io/reference/frame-macros/) that makes it easy to create pallets and flexibly compose them to create blockchains that can address -[a variety of needs](https://www.substrate.io/substrate-users/). +[a variety of needs](https://substrate.io/ecosystem/projects/). Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this template and note the following: @@ -184,8 +181,7 @@ the following: - The pallets are composed into a single runtime by way of the [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) macro, which is part of the core - [FRAME Support](https://docs.substrate.io/v3/runtime/frame#support-crate) - library. + FRAME Support [system](https://docs.substrate.io/reference/frame-pallets/#system-pallets) library. ### Pallets @@ -196,12 +192,12 @@ template pallet that is [defined in the `pallets`](./pallets/template/src/lib.rs A FRAME pallet is compromised of a number of blockchain primitives: - Storage: FRAME defines a rich set of powerful - [storage abstractions](https://docs.substrate.io/v3/runtime/storage) that makes + [storage abstractions](https://docs.substrate.io/main-docs/build/runtime-storage/) that makes it easy to use Substrate's efficient key-value database to manage the evolving state of a blockchain. - Dispatchables: FRAME pallets define special types of functions that can be invoked (dispatched) from outside of the runtime in order to update its state. -- Events: Substrate uses [events and errors](https://docs.substrate.io/v3/runtime/events-and-errors) +- Events: Substrate uses [events and errors](https://docs.substrate.io/main-docs/build/events-errors/) to notify users of important changes in the runtime. - Errors: When a dispatchable fails, it returns an error. - Config: The `Config` configuration interface is used to define the types and parameters upon diff --git a/bin/node-template/docker-compose.yml b/bin/node-template/docker-compose.yml index cfc4437bbae41..bc1922f47d963 100644 --- a/bin/node-template/docker-compose.yml +++ b/bin/node-template/docker-compose.yml @@ -3,7 +3,7 @@ version: "3.2" services: dev: container_name: node-template - image: paritytech/ci-linux:974ba3ac-20201006 + image: paritytech/ci-linux:production working_dir: /var/www/node-template ports: - "9944:9944" diff --git a/bin/node-template/docs/rust-setup.md b/bin/node-template/docs/rust-setup.md index ea133ca847af7..2755966e3ae0f 100644 --- a/bin/node-template/docs/rust-setup.md +++ b/bin/node-template/docs/rust-setup.md @@ -3,7 +3,7 @@ title: Installation --- This guide is for reference only, please check the latest information on getting starting with Substrate -[here](https://docs.substrate.io/v3/getting-started/installation/). +[here](https://docs.substrate.io/main-docs/install/). This page will guide you through the **2 steps** needed to prepare a computer for **Substrate** development. Since Substrate is built with [the Rust programming language](https://www.rust-lang.org/), the first @@ -73,11 +73,11 @@ brew install openssl ### Windows -**_PLEASE NOTE:_** Native development of Substrate is _not_ very well supported! It is _highly_ +**_PLEASE NOTE:_** Native Windows development of Substrate is _not_ very well supported! It is _highly_ recommend to use [Windows Subsystem Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10) (WSL) and follow the instructions for [Ubuntu/Debian](#ubuntudebian). Please refer to the separate -[guide for native Windows development](https://docs.substrate.io/v3/getting-started/windows-users/). +[guide for native Windows development](https://docs.substrate.io/main-docs/install/windows/). ## Rust developer environment diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index c8e74ea9515ac..eeba198da8212 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -42,7 +42,7 @@ frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } # These dependencies are used for the node template's RPCs -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } diff --git a/bin/node-template/node/src/command_helper.rs b/bin/node-template/node/src/benchmarking.rs similarity index 71% rename from bin/node-template/node/src/command_helper.rs rename to bin/node-template/node/src/benchmarking.rs index 287e81b1e96bd..f0e32104cd3ee 100644 --- a/bin/node-template/node/src/command_helper.rs +++ b/bin/node-template/node/src/benchmarking.rs @@ -16,13 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Contains code to setup the command invocations in [`super::command`] which would -//! otherwise bloat that module. +//! Setup code for [`super::command`] which would otherwise bloat that module. +//! +//! Should only be used for benchmarking as it may break in other contexts. use crate::service::FullClient; use node_template_runtime as runtime; -use runtime::SystemCall; +use runtime::{AccountId, Balance, BalancesCall, SystemCall}; use sc_cli::Result; use sc_client_api::BlockBackend; use sp_core::{Encode, Pair}; @@ -35,19 +36,27 @@ use std::{sync::Arc, time::Duration}; /// Generates extrinsics for the `benchmark overhead` command. /// /// Note: Should only be used for benchmarking. -pub struct BenchmarkExtrinsicBuilder { +pub struct RemarkBuilder { client: Arc, } -impl BenchmarkExtrinsicBuilder { +impl RemarkBuilder { /// Creates a new [`Self`] from the given client. pub fn new(client: Arc) -> Self { Self { client } } } -impl frame_benchmarking_cli::ExtrinsicBuilder for BenchmarkExtrinsicBuilder { - fn remark(&self, nonce: u32) -> std::result::Result { +impl frame_benchmarking_cli::ExtrinsicBuilder for RemarkBuilder { + fn pallet(&self) -> &str { + "system" + } + + fn extrinsic(&self) -> &str { + "remark" + } + + fn build(&self, nonce: u32) -> std::result::Result { let acc = Sr25519Keyring::Bob.pair(); let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic( self.client.as_ref(), @@ -61,6 +70,49 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for BenchmarkExtrinsicBuilder { } } +/// Generates `Balances::TransferKeepAlive` extrinsics for the benchmarks. +/// +/// Note: Should only be used for benchmarking. +pub struct TransferKeepAliveBuilder { + client: Arc, + dest: AccountId, + value: Balance, +} + +impl TransferKeepAliveBuilder { + /// Creates a new [`Self`] from the given client. + pub fn new(client: Arc, dest: AccountId, value: Balance) -> Self { + Self { client, dest, value } + } +} + +impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder { + fn pallet(&self) -> &str { + "balances" + } + + fn extrinsic(&self) -> &str { + "transfer_keep_alive" + } + + fn build(&self, nonce: u32) -> std::result::Result { + let acc = Sr25519Keyring::Bob.pair(); + let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic( + self.client.as_ref(), + acc, + BalancesCall::transfer_keep_alive { + dest: self.dest.clone().into(), + value: self.value.into(), + } + .into(), + nonce, + ) + .into(); + + Ok(extrinsic) + } +} + /// Create a transaction using the given `call`. /// /// Note: Should only be used for benchmarking. diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index e3e10007929e6..142f0b40c325e 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -1,14 +1,14 @@ use crate::{ + benchmarking::{inherent_benchmark_data, RemarkBuilder, TransferKeepAliveBuilder}, chain_spec, cli::{Cli, Subcommand}, - command_helper::{inherent_benchmark_data, BenchmarkExtrinsicBuilder}, service, }; -use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; -use node_template_runtime::Block; +use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE}; +use node_template_runtime::{Block, EXISTENTIAL_DEPOSIT}; use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; -use std::sync::Arc; +use sp_keyring::Sr25519Keyring; impl SubstrateCli for Cli { fn impl_name() -> String { @@ -137,9 +137,23 @@ pub fn run() -> sc_cli::Result<()> { }, BenchmarkCmd::Overhead(cmd) => { let PartialComponents { client, .. } = service::new_partial(&config)?; - let ext_builder = BenchmarkExtrinsicBuilder::new(client.clone()); + let ext_builder = RemarkBuilder::new(client.clone()); - cmd.run(config, client, inherent_benchmark_data()?, Arc::new(ext_builder)) + cmd.run(config, client, inherent_benchmark_data()?, &ext_builder) + }, + BenchmarkCmd::Extrinsic(cmd) => { + let PartialComponents { client, .. } = service::new_partial(&config)?; + // Register the *Remark* and *TKA* builders. + let ext_factory = ExtrinsicFactory(vec![ + Box::new(RemarkBuilder::new(client.clone())), + Box::new(TransferKeepAliveBuilder::new( + client.clone(), + Sr25519Keyring::Alice.to_account_id(), + EXISTENTIAL_DEPOSIT, + )), + ]); + + cmd.run(client, inherent_benchmark_data()?, &ext_factory) }, BenchmarkCmd::Machine(cmd) => cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), diff --git a/bin/node-template/node/src/main.rs b/bin/node-template/node/src/main.rs index 0f2fbd5a909c6..426cbabb6fbf7 100644 --- a/bin/node-template/node/src/main.rs +++ b/bin/node-template/node/src/main.rs @@ -4,9 +4,9 @@ mod chain_spec; #[macro_use] mod service; +mod benchmarking; mod cli; mod command; -mod command_helper; mod rpc; fn main() -> sc_cli::Result<()> { diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index f45f914d94f44..ffb2440caa0ed 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -296,24 +296,24 @@ pub fn new_full(mut config: Configuration) -> Result .spawn_blocking("aura", Some("block-authoring"), aura); } - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore = - if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; - - let grandpa_config = sc_finality_grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore, - local_role: role, - telemetry: telemetry.as_ref().map(|x| x.handle()), - protocol_name: grandpa_protocol_name, - }; - if enable_grandpa { + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = + if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; + + let grandpa_config = sc_finality_grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: Duration::from_millis(333), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore, + local_role: role, + telemetry: telemetry.as_ref().map(|x| x.handle()), + protocol_name: grandpa_protocol_name, + }; + // start the full GRANDPA voter // NOTE: non-authorities could run the GRANDPA observer protocol, but at // this point the full voter should provide better guarantees of block diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 067c7ce2575a0..a9209a9040b6d 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -2,7 +2,7 @@ /// Edit this file to define custom logic or remove it if it is not needed. /// Learn more about FRAME and the core library of Substrate FRAME pallets: -/// +/// pub use pallet::*; #[cfg(test)] @@ -19,6 +19,10 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + /// Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] pub trait Config: frame_system::Config { @@ -26,20 +30,16 @@ pub mod pallet { type Event: From> + IsType<::Event>; } - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); - // The pallet's runtime storage items. - // https://docs.substrate.io/v3/runtime/storage + // https://docs.substrate.io/main-docs/build/runtime-storage/ #[pallet::storage] #[pallet::getter(fn something)] // Learn more about declaring storage items: - // https://docs.substrate.io/v3/runtime/storage#declaring-storage-items + // https://docs.substrate.io/main-docs/build/runtime-storage/#declaring-storage-items pub type Something = StorageValue<_, u32>; // Pallets use events to inform users when important changes are made. - // https://docs.substrate.io/v3/runtime/events-and-errors + // https://docs.substrate.io/main-docs/build/events-errors/ #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -64,11 +64,11 @@ pub mod pallet { impl Pallet { /// An example dispatchable that takes a singles value as a parameter, writes the value to /// storage and emits an event. This function must be dispatched by a signed extrinsic. - #[pallet::weight(10_000 + T::DbWeight::get().writes(1))] + #[pallet::weight(10_000 + T::DbWeight::get().writes(1).ref_time())] pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. - // https://docs.substrate.io/v3/runtime/origins + // https://docs.substrate.io/main-docs/build/origins/ let who = ensure_signed(origin)?; // Update storage. @@ -81,7 +81,7 @@ pub mod pallet { } /// An example dispatchable that may throw a custom error. - #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1))] + #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1).ref_time())] pub fn cause_error(origin: OriginFor) -> DispatchResult { let _who = ensure_signed(origin)?; diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index 8721fe6c78851..e03f37b2eea69 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -17,8 +17,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - TemplateModule: pallet_template::{Pallet, Call, Storage, Event}, + System: frame_system, + TemplateModule: pallet_template, } ); diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index c514cdf6c25fd..b43fbde52dcdc 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -89,8 +89,8 @@ pub mod opaque { } } -// To learn more about runtime versioning and what each of the following value means: -// https://docs.substrate.io/v3/runtime/upgrades#runtime-versioning +// To learn more about runtime versioning, see: +// https://docs.substrate.io/main-docs/build/upgrade#runtime-versioning #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node-template"), @@ -234,6 +234,9 @@ impl pallet_timestamp::Config for Runtime { type WeightInfo = (); } +/// Existential deposit. +pub const EXISTENTIAL_DEPOSIT: u128 = 500; + impl pallet_balances::Config for Runtime { type MaxLocks = ConstU32<50>; type MaxReserves = (); @@ -243,7 +246,7 @@ impl pallet_balances::Config for Runtime { /// The ubiquitous event type. type Event = Event; type DustRemoval = (); - type ExistentialDeposit = ConstU128<500>; + type ExistentialDeposit = ConstU128; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; } @@ -269,10 +272,11 @@ impl pallet_template::Config for Runtime { // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( - pub enum Runtime where + pub struct Runtime + where Block = Block, NodeBlock = opaque::Block, - UncheckedExtrinsic = UncheckedExtrinsic + UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system, RandomnessCollectiveFlip: pallet_randomness_collective_flip, @@ -463,6 +467,23 @@ impl_runtime_apis! { } } + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info( + call: Call, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: Call, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + } + #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn benchmark_metadata(extra: bool) -> ( diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 90c325b8e5b32..c7a747c30ed2e 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -13,12 +13,12 @@ clap = { version = "3.1.18", features = ["derive"] } log = "0.4.17" node-primitives = { version = "2.0.0", path = "../primitives" } node-testing = { version = "3.0.0-dev", path = "../testing" } -node-runtime = { version = "3.0.0-dev", path = "../runtime" } +kitchensink-runtime = { version = "3.0.0-dev", path = "../runtime" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api/" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../primitives/state-machine" } serde = "1.0.136" -serde_json = "1.0.79" +serde_json = "1.0.85" derive_more = { version = "0.99.17", default-features = false, features = ["display"] } kvdb = "0.11.0" kvdb-rocksdb = "0.15.1" diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index 2b26ed9089a51..863928c719429 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -20,7 +20,7 @@ use std::{collections::HashMap, sync::Arc}; use kvdb::KeyValueDB; use node_primitives::Hash; -use sp_trie::{trie_types::TrieDBMutV1, TrieMut}; +use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut}; use crate::simple_trie::SimpleTrie; @@ -43,7 +43,8 @@ pub fn generate_trie( ); let mut trie = SimpleTrie { db, overlay: &mut overlay }; { - let mut trie_db = TrieDBMutV1::::new(&mut trie, &mut root); + let mut trie_db = + TrieDBMutBuilderV1::::new(&mut trie, &mut root).build(); for (key, value) in key_values { trie_db.insert(&key, &value).expect("trie insertion failed"); } diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index b9229fbd5331d..47f630eb68700 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -148,13 +148,13 @@ impl core::Benchmark for ImportBenchmark { // the transaction fee into the treasury // - extrinsic success assert_eq!( - node_runtime::System::events().len(), + kitchensink_runtime::System::events().len(), (self.block.extrinsics.len() - 1) * 8 + 1, ); }, BlockType::Noop => { assert_eq!( - node_runtime::System::events().len(), + kitchensink_runtime::System::events().len(), // should be 2 per signed extrinsic + 1 per unsigned // we have 1 unsigned and the rest are signed in the block // those 2 events per signed are: diff --git a/bin/node/bench/src/simple_trie.rs b/bin/node/bench/src/simple_trie.rs index c59389570e534..aa9c96a1cbd3f 100644 --- a/bin/node/bench/src/simple_trie.rs +++ b/bin/node/bench/src/simple_trie.rs @@ -33,7 +33,7 @@ pub struct SimpleTrie<'a> { impl<'a> AsHashDB for SimpleTrie<'a> { fn as_hash_db(&self) -> &dyn hash_db::HashDB { - &*self + self } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index d508dc712e1c3..de49a6fe7b6da 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -23,7 +23,7 @@ use kvdb::KeyValueDB; use lazy_static::lazy_static; use rand::Rng; use sp_state_machine::Backend as _; -use sp_trie::{trie_types::TrieDBMutV1, TrieMut as _}; +use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut as _}; use std::{borrow::Cow, collections::HashMap, sync::Arc}; use node_primitives::Hash; @@ -180,7 +180,7 @@ impl core::Benchmark for TrieReadBenchmark { let storage: Arc> = Arc::new(Storage(db.open(self.database_type))); - let trie_backend = sp_state_machine::TrieBackend::new(storage, self.root); + let trie_backend = sp_state_machine::TrieBackendBuilder::new(storage, self.root).build(); for (warmup_key, warmup_value) in self.warmup_keys.iter() { let value = trie_backend .storage(&warmup_key[..]) @@ -286,8 +286,7 @@ impl core::Benchmark for TrieWriteBenchmark { let mut overlay = HashMap::new(); let mut trie = SimpleTrie { db: kvdb.clone(), overlay: &mut overlay }; - let mut trie_db_mut = TrieDBMutV1::from_existing(&mut trie, &mut new_root) - .expect("Failed to create TrieDBMut"); + let mut trie_db_mut = TrieDBMutBuilderV1::from_existing(&mut trie, &mut new_root).build(); for (warmup_key, warmup_value) in self.warmup_keys.iter() { let value = trie_db_mut diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index b2e24f0b33189..d84b8491cb162 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -37,7 +37,7 @@ crate-type = ["cdylib", "rlib"] clap = { version = "3.1.18", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.0.0" } serde = { version = "1.0.136", features = ["derive"] } -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } futures = "0.3.21" hex-literal = "0.3.4" log = "0.4.17" @@ -66,6 +66,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sc-network = { version = "0.10.0-dev", path = "../../../client/network" } +sc-network-common = { version = "0.10.0-dev", path = "../../../client/network/common" } sc-consensus-slots = { version = "0.10.0-dev", path = "../../../client/consensus/slots" } sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } sc-consensus-uncles = { version = "0.10.0-dev", path = "../../../client/consensus/uncles" } @@ -87,7 +88,7 @@ pallet-asset-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transa pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../../frame/im-online" } # node-specific dependencies -node-runtime = { version = "3.0.0-dev", path = "../runtime" } +kitchensink-runtime = { version = "3.0.0-dev", path = "../runtime" } node-rpc = { version = "3.0.0-dev", path = "../rpc" } node-primitives = { version = "2.0.0", path = "../primitives" } node-executor = { version = "3.0.0-dev", path = "../executor" } @@ -97,7 +98,7 @@ sc-cli = { version = "0.10.0-dev", optional = true, path = "../../../client/cli" frame-benchmarking-cli = { version = "4.0.0-dev", optional = true, path = "../../../utils/frame/benchmarking-cli" } node-inspect = { version = "0.9.0-dev", optional = true, path = "../inspect" } try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } -serde_json = "1.0.79" +serde_json = "1.0.85" [target.'cfg(any(target_arch="x86_64", target_arch="aarch64"))'.dependencies] node-executor = { version = "3.0.0-dev", path = "../executor", features = ["wasmtime"] } @@ -159,10 +160,10 @@ cli = [ "substrate-build-script-utils", "try-runtime-cli", ] -runtime-benchmarks = ["node-runtime/runtime-benchmarks", "frame-benchmarking-cli"] +runtime-benchmarks = ["kitchensink-runtime/runtime-benchmarks", "frame-benchmarking-cli"] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. -try-runtime = ["node-runtime/try-runtime", "try-runtime-cli"] +try-runtime = ["kitchensink-runtime/try-runtime", "try-runtime-cli"] [[bench]] name = "transaction_pool" diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index ad16ba8e4072b..c0f3b96e093cb 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -18,8 +18,8 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; +use kitchensink_runtime::{constants::currency::*, BalancesCall}; use node_cli::service::{create_extrinsic, FullClient}; -use node_runtime::{constants::currency::*, BalancesCall}; use sc_block_builder::{BlockBuilderProvider, BuiltBlock, RecordProof}; use sc_client_api::execution_extensions::ExecutionStrategies; use sc_consensus::{ @@ -28,7 +28,7 @@ use sc_consensus::{ }; use sc_service::{ config::{ - DatabaseSource, KeepBlocks, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, + BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, PruningMode, WasmExecutionMethod, WasmtimeInstantiationStrategy, }, BasePath, Configuration, Role, @@ -72,10 +72,9 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { keystore: KeystoreConfig::InMemory, keystore_remote: Default::default(), database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, - state_cache_size: 67108864, - state_cache_child_ratio: None, + trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, chain_spec: spec, wasm_method: WasmExecutionMethod::Compiled { instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, @@ -121,9 +120,9 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { } fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic { - node_runtime::UncheckedExtrinsic { + kitchensink_runtime::UncheckedExtrinsic { signature: None, - function: node_runtime::Call::Timestamp(pallet_timestamp::Call::set { now }), + function: kitchensink_runtime::Call::Timestamp(pallet_timestamp::Call::set { now }), } .into() } diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index f1fce16d8c1b3..e6084fba8242a 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -16,15 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::time::Duration; + use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; use futures::{future, StreamExt}; +use kitchensink_runtime::{constants::currency::*, BalancesCall, SudoCall}; use node_cli::service::{create_extrinsic, fetch_nonce, FullClient, TransactionPool}; use node_primitives::AccountId; -use node_runtime::{constants::currency::*, BalancesCall, SudoCall}; use sc_client_api::execution_extensions::ExecutionStrategies; use sc_service::{ config::{ - DatabaseSource, KeepBlocks, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, + BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, PruningMode, TransactionPoolOptions, WasmExecutionMethod, }, BasePath, Configuration, Role, @@ -58,15 +60,15 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { ready: PoolLimit { count: 100_000, total_bytes: 100 * 1024 * 1024 }, future: PoolLimit { count: 100_000, total_bytes: 100 * 1024 * 1024 }, reject_future_transactions: false, + ban_time: Duration::from_secs(30 * 60), }, network: network_config, keystore: KeystoreConfig::InMemory, keystore_remote: Default::default(), database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, - state_cache_size: 67108864, - state_cache_child_ratio: None, + trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, chain_spec: spec, wasm_method: WasmExecutionMethod::Interpreted, // NOTE: we enforce the use of the native runtime to make the errors more debuggable @@ -240,25 +242,25 @@ fn transaction_pool_benchmarks(c: &mut Criterion) { move |b| { b.iter_batched( || { - let prepare_extrinsics = create_account_extrinsics(&*node.client, &accounts); + let prepare_extrinsics = create_account_extrinsics(&node.client, &accounts); runtime.block_on(future::join_all(prepare_extrinsics.into_iter().map(|tx| { submit_tx_and_wait_for_inclusion( &node.transaction_pool, tx, - &*node.client, + &node.client, true, ) }))); - create_benchmark_extrinsics(&*node.client, &accounts, extrinsics_per_account) + create_benchmark_extrinsics(&node.client, &accounts, extrinsics_per_account) }, |extrinsics| { runtime.block_on(future::join_all(extrinsics.into_iter().map(|tx| { submit_tx_and_wait_for_inclusion( &node.transaction_pool, tx, - &*node.client, + &node.client, false, ) }))); diff --git a/bin/node/cli/src/command_helper.rs b/bin/node/cli/src/benchmarking.rs similarity index 52% rename from bin/node/cli/src/command_helper.rs rename to bin/node/cli/src/benchmarking.rs index 84d85ee367cab..19bd1660a4dd9 100644 --- a/bin/node/cli/src/command_helper.rs +++ b/bin/node/cli/src/benchmarking.rs @@ -16,12 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Contains code to setup the command invocations in [`super::command`] which would -//! otherwise bloat that module. +//! Setup code for [`super::command`] which would otherwise bloat that module. +//! +//! Should only be used for benchmarking as it may break in other contexts. use crate::service::{create_extrinsic, FullClient}; -use node_runtime::SystemCall; +use kitchensink_runtime::{BalancesCall, SystemCall}; +use node_primitives::{AccountId, Balance}; use sc_cli::Result; use sp_inherents::{InherentData, InherentDataProvider}; use sp_keyring::Sr25519Keyring; @@ -29,20 +31,30 @@ use sp_runtime::OpaqueExtrinsic; use std::{sync::Arc, time::Duration}; -/// Generates extrinsics for the `benchmark overhead` command. -pub struct BenchmarkExtrinsicBuilder { +/// Generates `System::Remark` extrinsics for the benchmarks. +/// +/// Note: Should only be used for benchmarking. +pub struct RemarkBuilder { client: Arc, } -impl BenchmarkExtrinsicBuilder { +impl RemarkBuilder { /// Creates a new [`Self`] from the given client. pub fn new(client: Arc) -> Self { Self { client } } } -impl frame_benchmarking_cli::ExtrinsicBuilder for BenchmarkExtrinsicBuilder { - fn remark(&self, nonce: u32) -> std::result::Result { +impl frame_benchmarking_cli::ExtrinsicBuilder for RemarkBuilder { + fn pallet(&self) -> &str { + "system" + } + + fn extrinsic(&self) -> &str { + "remark" + } + + fn build(&self, nonce: u32) -> std::result::Result { let acc = Sr25519Keyring::Bob.pair(); let extrinsic: OpaqueExtrinsic = create_extrinsic( self.client.as_ref(), @@ -56,6 +68,48 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for BenchmarkExtrinsicBuilder { } } +/// Generates `Balances::TransferKeepAlive` extrinsics for the benchmarks. +/// +/// Note: Should only be used for benchmarking. +pub struct TransferKeepAliveBuilder { + client: Arc, + dest: AccountId, + value: Balance, +} + +impl TransferKeepAliveBuilder { + /// Creates a new [`Self`] from the given client. + pub fn new(client: Arc, dest: AccountId, value: Balance) -> Self { + Self { client, dest, value } + } +} + +impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder { + fn pallet(&self) -> &str { + "balances" + } + + fn extrinsic(&self) -> &str { + "transfer_keep_alive" + } + + fn build(&self, nonce: u32) -> std::result::Result { + let acc = Sr25519Keyring::Bob.pair(); + let extrinsic: OpaqueExtrinsic = create_extrinsic( + self.client.as_ref(), + acc, + BalancesCall::transfer_keep_alive { + dest: self.dest.clone().into(), + value: self.value.into(), + }, + Some(nonce), + ) + .into(); + + Ok(extrinsic) + } +} + /// Generates inherent data for the `benchmark overhead` command. pub fn inherent_benchmark_data() -> Result { let mut inherent_data = InherentData::new(); diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 47c3634aa00df..77e2f73dd6e18 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -20,7 +20,7 @@ use grandpa_primitives::AuthorityId as GrandpaId; use hex_literal::hex; -use node_runtime::{ +use kitchensink_runtime::{ constants::currency::*, wasm_binary_unwrap, AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, Block, CouncilConfig, DemocracyConfig, ElectionsConfig, GrandpaConfig, ImOnlineConfig, IndicesConfig, MaxNominations, NominationPoolsConfig, SessionConfig, @@ -40,8 +40,8 @@ use sp_runtime::{ Perbill, }; +pub use kitchensink_runtime::GenesisConfig; pub use node_primitives::{AccountId, Balance, Signature}; -pub use node_runtime::GenesisConfig; type AccountPublic = ::Signer; @@ -343,7 +343,7 @@ pub fn testnet_genesis( sudo: SudoConfig { key: Some(root_key) }, babe: BabeConfig { authorities: vec![], - epoch_config: Some(node_runtime::BABE_GENESIS_EPOCH_CONFIG), + epoch_config: Some(kitchensink_runtime::BABE_GENESIS_EPOCH_CONFIG), }, im_online: ImOnlineConfig { keys: vec![] }, authority_discovery: AuthorityDiscoveryConfig { keys: vec![] }, diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index b17a26fa02935..85e5415dbe139 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -16,18 +16,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use super::command_helper::{inherent_benchmark_data, BenchmarkExtrinsicBuilder}; +use super::benchmarking::{inherent_benchmark_data, RemarkBuilder, TransferKeepAliveBuilder}; use crate::{ chain_spec, service, service::{new_partial, FullClient}, Cli, Subcommand, }; use frame_benchmarking_cli::*; +use kitchensink_runtime::{ExistentialDeposit, RuntimeApi}; use node_executor::ExecutorDispatch; use node_primitives::Block; -use node_runtime::RuntimeApi; use sc_cli::{ChainSpec, Result, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; +use sp_keyring::Sr25519Keyring; use std::sync::Arc; @@ -74,7 +75,7 @@ impl SubstrateCli for Cli { } fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { - &node_runtime::VERSION + &kitchensink_runtime::VERSION } } @@ -126,9 +127,23 @@ pub fn run() -> Result<()> { }, BenchmarkCmd::Overhead(cmd) => { let PartialComponents { client, .. } = new_partial(&config)?; - let ext_builder = BenchmarkExtrinsicBuilder::new(client.clone()); + let ext_builder = RemarkBuilder::new(client.clone()); - cmd.run(config, client, inherent_benchmark_data()?, Arc::new(ext_builder)) + cmd.run(config, client, inherent_benchmark_data()?, &ext_builder) + }, + BenchmarkCmd::Extrinsic(cmd) => { + let PartialComponents { client, .. } = service::new_partial(&config)?; + // Register the *Remark* and *TKA* builders. + let ext_factory = ExtrinsicFactory(vec![ + Box::new(RemarkBuilder::new(client.clone())), + Box::new(TransferKeepAliveBuilder::new( + client.clone(), + Sr25519Keyring::Alice.to_account_id(), + ExistentialDeposit::get(), + )), + ]); + + cmd.run(client, inherent_benchmark_data()?, &ext_factory) }, BenchmarkCmd::Machine(cmd) => cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), diff --git a/bin/node/cli/src/lib.rs b/bin/node/cli/src/lib.rs index 06c0bcccbc296..13c074268e50f 100644 --- a/bin/node/cli/src/lib.rs +++ b/bin/node/cli/src/lib.rs @@ -35,11 +35,11 @@ pub mod chain_spec; #[macro_use] pub mod service; #[cfg(feature = "cli")] +mod benchmarking; +#[cfg(feature = "cli")] mod cli; #[cfg(feature = "cli")] mod command; -#[cfg(feature = "cli")] -mod command_helper; #[cfg(feature = "cli")] pub use cli::*; diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index bff4be88002fb..b20a3ac59a96a 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -23,13 +23,14 @@ use codec::Encode; use frame_system_rpc_runtime_api::AccountNonceApi; use futures::prelude::*; +use kitchensink_runtime::RuntimeApi; use node_executor::ExecutorDispatch; use node_primitives::Block; -use node_runtime::RuntimeApi; use sc_client_api::{BlockBackend, ExecutorProvider}; use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; -use sc_network::{Event, NetworkService}; +use sc_network::NetworkService; +use sc_network_common::{protocol::event::Event, service::NetworkEventStream}; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_api::ProvideRuntimeApi; @@ -68,41 +69,43 @@ pub fn fetch_nonce(client: &FullClient, account: sp_core::sr25519::Pair) -> u32 pub fn create_extrinsic( client: &FullClient, sender: sp_core::sr25519::Pair, - function: impl Into, + function: impl Into, nonce: Option, -) -> node_runtime::UncheckedExtrinsic { +) -> kitchensink_runtime::UncheckedExtrinsic { let function = function.into(); let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); let best_hash = client.chain_info().best_hash; let best_block = client.chain_info().best_number; let nonce = nonce.unwrap_or_else(|| fetch_nonce(client, sender.clone())); - let period = node_runtime::BlockHashCount::get() + let period = kitchensink_runtime::BlockHashCount::get() .checked_next_power_of_two() .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: node_runtime::SignedExtra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::mortal( + let extra: kitchensink_runtime::SignedExtra = ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal( period, best_block.saturated_into(), )), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_asset_tx_payment::ChargeAssetTxPayment::::from(tip, None), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_asset_tx_payment::ChargeAssetTxPayment::::from( + tip, None, + ), ); - let raw_payload = node_runtime::SignedPayload::from_raw( + let raw_payload = kitchensink_runtime::SignedPayload::from_raw( function.clone(), extra.clone(), ( (), - node_runtime::VERSION.spec_version, - node_runtime::VERSION.transaction_version, + kitchensink_runtime::VERSION.spec_version, + kitchensink_runtime::VERSION.transaction_version, genesis_hash, best_hash, (), @@ -112,10 +115,10 @@ pub fn create_extrinsic( ); let signature = raw_payload.using_encoded(|e| sender.sign(e)); - node_runtime::UncheckedExtrinsic::new_signed( + kitchensink_runtime::UncheckedExtrinsic::new_signed( function, sp_runtime::AccountId32::from(sender.public()).into(), - node_runtime::Signature::Sr25519(signature), + kitchensink_runtime::Signature::Sr25519(signature), extra, ) } @@ -565,11 +568,11 @@ pub fn new_full( mod tests { use crate::service::{new_full_base, NewFullBase}; use codec::Encode; - use node_primitives::{Block, DigestItem, Signature}; - use node_runtime::{ + use kitchensink_runtime::{ constants::{currency::CENTS, time::SLOT_DURATION}, Address, BalancesCall, Call, UncheckedExtrinsic, }; + use node_primitives::{Block, DigestItem, Signature}; use sc_client_api::BlockBackend; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_consensus_babe::{BabeIntermediate, CompatibleDigestItem, INTERMEDIATE_KEY}; diff --git a/bin/node/cli/tests/benchmark_extrinsic_works.rs b/bin/node/cli/tests/benchmark_extrinsic_works.rs new file mode 100644 index 0000000000000..69800ad3c6c13 --- /dev/null +++ b/bin/node/cli/tests/benchmark_extrinsic_works.rs @@ -0,0 +1,46 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use assert_cmd::cargo::cargo_bin; +use std::process::Command; +use tempfile::tempdir; + +/// Tests that the `benchmark extrinsic` command works for +/// remark and transfer_keep_alive within the substrate dev runtime. +#[test] +fn benchmark_extrinsic_works() { + benchmark_extrinsic("system", "remark"); + benchmark_extrinsic("balances", "transfer_keep_alive"); +} + +/// Checks that the `benchmark extrinsic` command works for the given pallet and extrinsic. +fn benchmark_extrinsic(pallet: &str, extrinsic: &str) { + let base_dir = tempdir().expect("could not create a temp dir"); + + let status = Command::new(cargo_bin("substrate")) + .args(&["benchmark", "extrinsic", "--dev"]) + .arg("-d") + .arg(base_dir.path()) + .args(&["--pallet", pallet, "--extrinsic", extrinsic]) + // Run with low repeats for faster execution. + .args(["--warmup=10", "--repeat=10", "--max-ext-per-block=10"]) + .status() + .unwrap(); + + assert!(status.success()); +} diff --git a/bin/node/cli/tests/benchmark_pallet_works.rs b/bin/node/cli/tests/benchmark_pallet_works.rs new file mode 100644 index 0000000000000..bf29c0e308bcb --- /dev/null +++ b/bin/node/cli/tests/benchmark_pallet_works.rs @@ -0,0 +1,49 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use assert_cmd::cargo::cargo_bin; +use std::process::Command; + +pub mod common; + +/// `benchmark pallet` works for the different combinations of `steps` and `repeat`. +#[test] +fn benchmark_pallet_works() { + // Some invalid combinations: + benchmark_pallet(0, 10, false); + benchmark_pallet(1, 10, false); + // ... and some valid: + benchmark_pallet(2, 1, true); + benchmark_pallet(50, 20, true); + benchmark_pallet(20, 50, true); +} + +fn benchmark_pallet(steps: u32, repeat: u32, should_work: bool) { + let output = Command::new(cargo_bin("substrate")) + .args(["benchmark", "pallet", "--dev"]) + // Use the `addition` benchmark since is the fastest. + .args(["--pallet", "frame-benchmarking", "--extrinsic", "addition"]) + .args(["--steps", &format!("{}", steps), "--repeat", &format!("{}", repeat)]) + .output() + .unwrap(); + + if output.status.success() != should_work { + let log = String::from_utf8_lossy(&output.stderr).to_string(); + panic!("Test failed:\n{}", log); + } +} diff --git a/bin/node/cli/tests/benchmark_storage_works.rs b/bin/node/cli/tests/benchmark_storage_works.rs index 30f860e48459f..82d1c943ae7aa 100644 --- a/bin/node/cli/tests/benchmark_storage_works.rs +++ b/bin/node/cli/tests/benchmark_storage_works.rs @@ -47,6 +47,7 @@ fn benchmark_storage(db: &str, base_path: &Path) -> ExitStatus { .args(["--state-version", "1"]) .args(["--warmups", "0"]) .args(["--add", "100", "--mul", "1.2", "--metric", "p75"]) + .arg("--include-child-trees") .status() .unwrap() } diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 5fbf59a74fdcd..71865783da8ac 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -16,7 +16,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } scale-info = { version = "2.1.1", features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarking" } node-primitives = { version = "2.0.0", path = "../primitives" } -node-runtime = { version = "3.0.0-dev", path = "../runtime" } +kitchensink-runtime = { version = "3.0.0-dev", path = "../runtime" } sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-keystore = { version = "0.12.0", path = "../../../primitives/keystore" } @@ -34,6 +34,7 @@ node-testing = { version = "3.0.0-dev", path = "../testing" } pallet-balances = { version = "4.0.0-dev", path = "../../../frame/balances" } pallet-contracts = { version = "4.0.0-dev", path = "../../../frame/contracts" } pallet-im-online = { version = "4.0.0-dev", path = "../../../frame/im-online" } +pallet-sudo = { version = "4.0.0-dev", path = "../../../frame/sudo" } pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" } pallet-treasury = { version = "4.0.0-dev", path = "../../../frame/treasury" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index 61e2d1b053012..a1d31a5a966db 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -18,12 +18,12 @@ use codec::{Decode, Encode}; use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use frame_support::Hashable; -use node_executor::ExecutorDispatch; -use node_primitives::{BlockNumber, Hash}; -use node_runtime::{ +use kitchensink_runtime::{ constants::currency::*, Block, BuildStorage, Call, CheckedExtrinsic, GenesisConfig, Header, UncheckedExtrinsic, }; +use node_executor::ExecutorDispatch; +use node_primitives::{BlockNumber, Hash}; use node_testing::keyring::*; #[cfg(feature = "wasmtime")] use sc_executor::WasmtimeInstantiationStrategy; @@ -41,7 +41,7 @@ criterion_main!(benches); /// The wasm runtime code. pub fn compact_code_unwrap() -> &'static [u8] { - node_runtime::WASM_BINARY.expect( + kitchensink_runtime::WASM_BINARY.expect( "Development wasm binary is not available. Testing is only supported with the flag \ disabled.", ) @@ -49,9 +49,9 @@ pub fn compact_code_unwrap() -> &'static [u8] { const GENESIS_HASH: [u8; 32] = [69u8; 32]; -const TRANSACTION_VERSION: u32 = node_runtime::VERSION.transaction_version; +const TRANSACTION_VERSION: u32 = kitchensink_runtime::VERSION.transaction_version; -const SPEC_VERSION: u32 = node_runtime::VERSION.spec_version; +const SPEC_VERSION: u32 = kitchensink_runtime::VERSION.spec_version; const HEAP_PAGES: u64 = 20; diff --git a/bin/node/executor/src/lib.rs b/bin/node/executor/src/lib.rs index 9f87c7d12623c..c4edf5ad22f47 100644 --- a/bin/node/executor/src/lib.rs +++ b/bin/node/executor/src/lib.rs @@ -28,10 +28,10 @@ impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { - node_runtime::api::dispatch(method, data) + kitchensink_runtime::api::dispatch(method, data) } fn native_version() -> sc_executor::NativeVersion { - node_runtime::native_version() + kitchensink_runtime::native_version() } } diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 31a9bd0a90496..3426df5872c35 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -18,7 +18,7 @@ use codec::{Decode, Encode, Joiner}; use frame_support::{ traits::Currency, - weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Weight}, }; use frame_system::{self, AccountInfo, EventRecord, Phase}; use sp_core::{storage::well_known_keys, traits::Externalities, NeverNativeValue}; @@ -26,12 +26,12 @@ use sp_runtime::{ traits::Hash as HashT, transaction_validity::InvalidTransaction, ApplyExtrinsicResult, }; -use node_primitives::{Balance, Hash}; -use node_runtime::{ +use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, Balances, Call, CheckedExtrinsic, Event, Header, Runtime, System, TransactionPayment, UncheckedExtrinsic, }; +use node_primitives::{Balance, Hash}; use node_testing::keyring::*; use wat; @@ -44,7 +44,7 @@ use self::common::{sign, *}; /// have to execute provided wasm code instead of the native equivalent. This trick is used to /// test code paths that differ between native and wasm versions. pub fn bloaty_code_unwrap() -> &'static [u8] { - node_runtime::WASM_BINARY_BLOATY.expect( + kitchensink_runtime::WASM_BINARY_BLOATY.expect( "Development wasm binary is not available. \ Testing is only supported with the flag disabled.", ) @@ -127,7 +127,7 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { let block2 = construct_block( &mut t, 2, - block1.1.clone(), + block1.1, vec![ CheckedExtrinsic { signed: None, @@ -733,7 +733,7 @@ fn deploying_wasm_contract_should_work() { function: Call::Contracts( pallet_contracts::Call::instantiate_with_code:: { value: 0, - gas_limit: 500_000_000, + gas_limit: Weight::from_ref_time(500_000_000), storage_deposit_limit: None, code: transfer_code, data: Vec::new(), @@ -746,7 +746,7 @@ fn deploying_wasm_contract_should_work() { function: Call::Contracts(pallet_contracts::Call::call:: { dest: sp_runtime::MultiAddress::Id(addr.clone()), value: 10, - gas_limit: 500_000_000, + gas_limit: Weight::from_ref_time(500_000_000), storage_deposit_limit: None, data: vec![0x00, 0x01, 0x02, 0x03], }), diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index a2bb91056f474..407a1e09f8efb 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -35,12 +35,12 @@ use sp_runtime::{ }; use sp_state_machine::TestExternalities as CoreTestExternalities; -use node_executor::ExecutorDispatch; -use node_primitives::{BlockNumber, Hash}; -use node_runtime::{ +use kitchensink_runtime::{ constants::currency::*, Block, BuildStorage, CheckedExtrinsic, Header, Runtime, UncheckedExtrinsic, }; +use node_executor::ExecutorDispatch; +use node_primitives::{BlockNumber, Hash}; use node_testing::keyring::*; use sp_externalities::Externalities; @@ -69,7 +69,7 @@ impl AppCrypto for TestAuthorityId { /// making the binary slimmer. There is a convention to use compact version of the runtime /// as canonical. pub fn compact_code_unwrap() -> &'static [u8] { - node_runtime::WASM_BINARY.expect( + kitchensink_runtime::WASM_BINARY.expect( "Development wasm binary is not available. Testing is only supported with the flag \ disabled.", ) @@ -77,9 +77,9 @@ pub fn compact_code_unwrap() -> &'static [u8] { pub const GENESIS_HASH: [u8; 32] = [69u8; 32]; -pub const SPEC_VERSION: u32 = node_runtime::VERSION.spec_version; +pub const SPEC_VERSION: u32 = kitchensink_runtime::VERSION.spec_version; -pub const TRANSACTION_VERSION: u32 = node_runtime::VERSION.transaction_version; +pub const TRANSACTION_VERSION: u32 = kitchensink_runtime::VERSION.transaction_version; pub type TestExternalities = CoreTestExternalities; diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index cf794bae1d307..dd1318254d9b7 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -20,11 +20,11 @@ use frame_support::{ traits::Currency, weights::{constants::ExtrinsicBaseWeight, GetDispatchInfo, IdentityFee, WeightToFee}, }; -use node_primitives::Balance; -use node_runtime::{ +use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, Balances, Call, CheckedExtrinsic, Multiplier, Runtime, TransactionByteFee, TransactionPayment, }; +use node_primitives::Balance; use node_testing::keyring::*; use sp_core::NeverNativeValue; use sp_runtime::{traits::One, Perbill}; @@ -58,8 +58,10 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), - function: Call::System(frame_system::Call::fill_block { - ratio: Perbill::from_percent(60), + function: Call::Sudo(pallet_sudo::Call::sudo { + call: Box::new(Call::System(frame_system::Call::fill_block { + ratio: Perbill::from_percent(60), + })), }), }, ], @@ -71,7 +73,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { let block2 = construct_block( &mut tt, 2, - block1.1.clone(), + block1.1, vec![ CheckedExtrinsic { signed: None, @@ -206,7 +208,7 @@ fn transaction_fee_is_correct() { // we know that weight to fee multiplier is effect-less in block 1. // current weight of transfer = 200_000_000 // Linear weight to fee is 1:1 right now (1 weight = 1 unit of balance) - assert_eq!(weight_fee, weight as Balance); + assert_eq!(weight_fee, weight.ref_time() as Balance); balance_alice -= base_fee; balance_alice -= weight_fee; balance_alice -= tip; diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index 7df13a577006e..be43f3c78674f 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -17,7 +17,7 @@ use codec::Decode; use frame_system::offchain::{SendSignedTransaction, Signer, SubmitTransaction}; -use node_runtime::{Executive, Indices, Runtime, UncheckedExtrinsic}; +use kitchensink_runtime::{Executive, Indices, Runtime, UncheckedExtrinsic}; use sp_application_crypto::AppKey; use sp_core::offchain::{testing::TestTransactionPoolExt, TransactionPoolExt}; use sp_keyring::sr25519::Keyring::Alice; diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 547d0df7a8372..07b25085b9d10 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } node-primitives = { version = "2.0.0", path = "../primitives" } pallet-contracts-rpc = { version = "4.0.0-dev", path = "../../../frame/contracts/rpc/" } pallet-mmr-rpc = { version = "3.0.0", path = "../../../frame/merkle-mountain-range/rpc/" } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index ca971f29e93c9..10b15b6ec554d 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "node-runtime" +name = "kitchensink-runtime" version = "3.0.0-dev" authors = ["Parity Technologies "] edition = "2021" @@ -79,6 +79,7 @@ pallet-mmr = { version = "4.0.0-dev", default-features = false, path = "../../.. pallet-multisig = { version = "4.0.0-dev", default-features = false, path = "../../../frame/multisig" } pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../../../frame/nomination-pools"} pallet-nomination-pools-benchmarking = { version = "1.0.0", default-features = false, optional = true, path = "../../../frame/nomination-pools/benchmarking" } +pallet-nomination-pools-runtime-api = { version = "1.0.0-dev", default-features = false, path = "../../../frame/nomination-pools/runtime-api" } pallet-offences = { version = "4.0.0-dev", default-features = false, path = "../../../frame/offences" } pallet-offences-benchmarking = { version = "4.0.0-dev", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } pallet-preimage = { version = "4.0.0-dev", default-features = false, path = "../../../frame/preimage" } @@ -145,6 +146,7 @@ std = [ "pallet-mmr/std", "pallet-multisig/std", "pallet-nomination-pools/std", + "pallet-nomination-pools-runtime-api/std", "pallet-identity/std", "pallet-scheduler/std", "node-primitives/std", diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 68c780094208f..510955a5b7b3e 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -163,13 +163,13 @@ mod multiplier_tests { let previous_float = previous_float.max(min_multiplier().into_inner() as f64 / accuracy); // maximum tx weight - let m = max_normal() as f64; + let m = max_normal().ref_time() as f64; // block weight always truncated to max weight - let block_weight = (block_weight as f64).min(m); + let block_weight = (block_weight.ref_time() as f64).min(m); let v: f64 = AdjustmentVariable::get().to_float(); // Ideal saturation in terms of weight - let ss = target() as f64; + let ss = target().ref_time() as f64; // Current saturation in terms of weight let s = block_weight; @@ -197,12 +197,12 @@ mod multiplier_tests { fn truth_value_update_poc_works() { let fm = Multiplier::saturating_from_rational(1, 2); let test_set = vec![ - (0, fm.clone()), - (100, fm.clone()), - (1000, fm.clone()), - (target(), fm.clone()), - (max_normal() / 2, fm.clone()), - (max_normal(), fm.clone()), + (Weight::zero(), fm), + (Weight::from_ref_time(100), fm), + (Weight::from_ref_time(1000), fm), + (target(), fm), + (max_normal() / 2, fm), + (max_normal(), fm), ]; test_set.into_iter().for_each(|(w, fm)| { run_with_system_weight(w, || { @@ -229,7 +229,7 @@ mod multiplier_tests { #[test] fn multiplier_cannot_go_below_limit() { // will not go any further below even if block is empty. - run_with_system_weight(0, || { + run_with_system_weight(Weight::new(), || { let next = runtime_multiplier_update(min_multiplier()); assert_eq!(next, min_multiplier()); }) @@ -247,7 +247,7 @@ mod multiplier_tests { // 1 < 0.00001 * k * 0.1875 // 10^9 / 1875 < k // k > 533_333 ~ 18,5 days. - run_with_system_weight(0, || { + run_with_system_weight(Weight::new(), || { // start from 1, the default. let mut fm = Multiplier::one(); let mut iterations: u64 = 0; @@ -283,7 +283,8 @@ mod multiplier_tests { // `cargo test congested_chain_simulation -- --nocapture` to get some insight. // almost full. The entire quota of normal transactions is taken. - let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() - 100; + let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() - + Weight::from_ref_time(100); // Default substrate weight. let tx_weight = frame_support::weights::constants::ExtrinsicBaseWeight::get(); @@ -407,27 +408,27 @@ mod multiplier_tests { #[test] fn weight_to_fee_should_not_overflow_on_large_weights() { - let kb = 1024 as Weight; + let kb = Weight::from_ref_time(1024); let mb = kb * kb; let max_fm = Multiplier::saturating_from_integer(i128::MAX); // check that for all values it can compute, correctly. vec![ - 0, - 1, - 10, - 1000, + Weight::zero(), + Weight::one(), + Weight::from_ref_time(10), + Weight::from_ref_time(1000), kb, 10 * kb, 100 * kb, mb, 10 * mb, - 2147483647, - 4294967295, + Weight::from_ref_time(2147483647), + Weight::from_ref_time(4294967295), BlockWeights::get().max_block / 2, BlockWeights::get().max_block, - Weight::max_value() / 2, - Weight::max_value(), + Weight::MAX / 2, + Weight::MAX, ] .into_iter() .for_each(|i| { @@ -440,7 +441,7 @@ mod multiplier_tests { // Some values that are all above the target and will cause an increase. let t = target(); - vec![t + 100, t * 2, t * 4].into_iter().for_each(|i| { + vec![t + Weight::from_ref_time(100), t * 2, t * 4].into_iter().for_each(|i| { run_with_system_weight(i, || { let fm = runtime_multiplier_update(max_fm); // won't grow. The convert saturates everything. diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 1a60c9d4752d8..c268bdf112229 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -68,7 +68,7 @@ use sp_runtime::{ SaturatedConversion, StaticLookup, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedPointNumber, Perbill, Percent, Permill, Perquintill, + ApplyExtrinsicResult, FixedPointNumber, FixedU128, Perbill, Percent, Permill, Perquintill, }; use sp_std::prelude::*; #[cfg(any(feature = "std", test))] @@ -170,7 +170,7 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); /// We allow for 2 seconds of compute with a 6 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; +const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.scalar_saturating_mul(2); parameter_types! { pub const BlockHashCount: BlockNumber = 2400; @@ -452,6 +452,7 @@ impl pallet_transaction_payment::Config for Runtime { } impl pallet_asset_tx_payment::Config for Runtime { + type Event = Event; type Fungibles = Assets; type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< pallet_assets::BalanceToAssetBalance, @@ -726,7 +727,7 @@ impl pallet_bags_list::Config for Runtime { parameter_types! { pub const PostUnbondPoolsWindow: u32 = 4; pub const NominationPoolsPalletId: PalletId = PalletId(*b"py/nopls"); - pub const MinPointsToBalance: u32 = 10; + pub const MaxPointsToBalance: u8 = 10; } use sp_runtime::traits::Convert; @@ -747,6 +748,8 @@ impl pallet_nomination_pools::Config for Runtime { type WeightInfo = (); type Event = Event; type Currency = Balances; + type CurrencyBalance = Balance; + type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; type StakingInterface = pallet_staking::Pallet; @@ -754,7 +757,7 @@ impl pallet_nomination_pools::Config for Runtime { type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; type PalletId = NominationPoolsPalletId; - type MinPointsToBalance = MinPointsToBalance; + type MaxPointsToBalance = MaxPointsToBalance; } parameter_types! { @@ -961,6 +964,8 @@ parameter_types! { pub const TermDuration: BlockNumber = 7 * DAYS; pub const DesiredMembers: u32 = 13; pub const DesiredRunnersUp: u32 = 7; + pub const MaxVoters: u32 = 10 * 1000; + pub const MaxCandidates: u32 = 1000; pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; } @@ -984,6 +989,8 @@ impl pallet_elections_phragmen::Config for Runtime { type DesiredMembers = DesiredMembers; type DesiredRunnersUp = DesiredRunnersUp; type TermDuration = TermDuration; + type MaxVoters = MaxVoters; + type MaxCandidates = MaxCandidates; type WeightInfo = pallet_elections_phragmen::weights::SubstrateWeight; } @@ -1509,8 +1516,10 @@ impl pallet_state_trie_migration::Config for Runtime { type WeightInfo = (); } +const ALLIANCE_MOTION_DURATION_IN_BLOCKS: BlockNumber = 5 * DAYS; + parameter_types! { - pub const AllianceMotionDuration: BlockNumber = 5 * DAYS; + pub const AllianceMotionDuration: BlockNumber = ALLIANCE_MOTION_DURATION_IN_BLOCKS; pub const AllianceMaxProposals: u32 = 100; pub const AllianceMaxMembers: u32 = 100; } @@ -1532,6 +1541,7 @@ parameter_types! { pub const MaxFellows: u32 = AllianceMaxMembers::get() - MaxFounders::get(); pub const MaxAllies: u32 = 100; pub const AllyDeposit: Balance = 10 * DOLLARS; + pub const RetirementPeriod: BlockNumber = ALLIANCE_MOTION_DURATION_IN_BLOCKS + (1 * DAYS); } impl pallet_alliance::Config for Runtime { @@ -1568,6 +1578,7 @@ impl pallet_alliance::Config for Runtime { type MaxMembersCount = AllianceMaxMembers; type AllyDeposit = AllyDeposit; type WeightInfo = pallet_alliance::weights::SubstrateWeight; + type RetirementPeriod = RetirementPeriod; } construct_runtime!( @@ -1673,8 +1684,16 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, + Migrations, >; +// All migrations executed on runtime upgrade as a nested tuple of types implementing +// `OnRuntimeUpgrade`. +type Migrations = ( + pallet_nomination_pools::migration::v2::MigrateToV2, + pallet_alliance::migration::Migration, +); + /// MMR helper types. mod mmr { use super::Runtime; @@ -1693,6 +1712,7 @@ extern crate frame_benchmarking; mod benches { define_benchmarks!( [frame_benchmarking, BaselineBench::] + [pallet_alliance, Alliance] [pallet_assets, Assets] [pallet_babe, Babe] [pallet_bags_list, BagsList] @@ -1830,6 +1850,12 @@ impl_runtime_apis! { } } + impl pallet_nomination_pools_runtime_api::NominationPoolsApi for Runtime { + fn pending_rewards(member_account: AccountId) -> Balance { + NominationPools::pending_rewards(member_account).unwrap_or_default() + } + } + impl sp_consensus_babe::BabeApi for Runtime { fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration { // The choice of `c` parameter (where `1 - c` represents the @@ -1908,7 +1934,7 @@ impl_runtime_apis! { storage_deposit_limit: Option, input_data: Vec, ) -> pallet_contracts_primitives::ContractExecResult { - Contracts::bare_call(origin, dest, value, gas_limit, storage_deposit_limit, input_data, true) + Contracts::bare_call(origin, dest, value, Weight::from_ref_time(gas_limit), storage_deposit_limit, input_data, true) } fn instantiate( @@ -1921,7 +1947,7 @@ impl_runtime_apis! { salt: Vec, ) -> pallet_contracts_primitives::ContractInstantiateResult { - Contracts::bare_instantiate(origin, value, gas_limit, storage_deposit_limit, code, data, salt, true) + Contracts::bare_instantiate(origin, value, Weight::from_ref_time(gas_limit), storage_deposit_limit, code, data, salt, true) } fn upload_code( @@ -1953,6 +1979,17 @@ impl_runtime_apis! { } } + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info(call: Call, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details(call: Call, len: u32) -> FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + } + impl pallet_mmr::primitives::MmrApi< Block, mmr::Hash, diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index 7caf10366b48c..ed81301e45189 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -21,7 +21,7 @@ tempfile = "3.1.0" frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } node-executor = { version = "3.0.0-dev", path = "../executor" } node-primitives = { version = "2.0.0", path = "../primitives" } -node-runtime = { version = "3.0.0-dev", path = "../runtime" } +kitchensink-runtime = { version = "3.0.0-dev", path = "../runtime" } pallet-asset-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/asset-tx-payment" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 00ce7f64bc3f0..534f0a4f09732 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -34,11 +34,11 @@ use crate::{ }; use codec::{Decode, Encode}; use futures::executor; -use node_primitives::Block; -use node_runtime::{ +use kitchensink_runtime::{ constants::currency::DOLLARS, AccountId, BalancesCall, Call, CheckedExtrinsic, MinimumPeriod, Signature, SystemCall, UncheckedExtrinsic, }; +use node_primitives::Block; use sc_block_builder::BlockBuilderProvider; use sc_client_api::{ execution_extensions::{ExecutionExtensions, ExecutionStrategies}, @@ -304,20 +304,21 @@ impl<'a> Iterator for BlockContentIterator<'a> { CheckedExtrinsic { signed: Some(( sender, - signed_extra(0, node_runtime::ExistentialDeposit::get() + 1), + signed_extra(0, kitchensink_runtime::ExistentialDeposit::get() + 1), )), function: match self.content.block_type { BlockType::RandomTransfersKeepAlive => Call::Balances(BalancesCall::transfer_keep_alive { dest: sp_runtime::MultiAddress::Id(receiver), - value: node_runtime::ExistentialDeposit::get() + 1, + value: kitchensink_runtime::ExistentialDeposit::get() + 1, }), BlockType::RandomTransfersReaping => { Call::Balances(BalancesCall::transfer { dest: sp_runtime::MultiAddress::Id(receiver), // Transfer so that ending balance would be 1 less than existential // deposit so that we kill the sender account. - value: 100 * DOLLARS - (node_runtime::ExistentialDeposit::get() - 1), + value: 100 * DOLLARS - + (kitchensink_runtime::ExistentialDeposit::get() - 1), }) }, BlockType::Noop => Call::System(SystemCall::remark { remark: Vec::new() }), @@ -387,11 +388,10 @@ impl BenchDb { keyring: &BenchKeyring, ) -> (Client, std::sync::Arc, TaskExecutor) { let db_config = sc_client_db::DatabaseSettings { - state_cache_size: 16 * 1024 * 1024, - state_cache_child_ratio: Some((0, 100)), + trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), source: database_type.into_settings(dir.into()), - keep_blocks: sc_client_db::KeepBlocks::All, + blocks_pruning: sc_client_db::BlocksPruning::All, }; let task_executor = TaskExecutor::new(); @@ -592,9 +592,9 @@ impl BenchKeyring { } /// Generate genesis with accounts from this keyring endowed with some balance. - pub fn generate_genesis(&self) -> node_runtime::GenesisConfig { + pub fn generate_genesis(&self) -> kitchensink_runtime::GenesisConfig { crate::genesis::config_endowed( - Some(node_runtime::wasm_binary_unwrap()), + Some(kitchensink_runtime::wasm_binary_unwrap()), self.collect_account_ids(), ) } diff --git a/bin/node/testing/src/client.rs b/bin/node/testing/src/client.rs index 8cb98511098f1..590304bdd52a5 100644 --- a/bin/node/testing/src/client.rs +++ b/bin/node/testing/src/client.rs @@ -16,14 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Utilities to build a `TestClient` for `node-runtime`. +//! Utilities to build a `TestClient` for `kitchensink-runtime`. use sc_service::client; use sp_runtime::BuildStorage; /// Re-export test-client utilities. pub use substrate_test_client::*; -/// Call executor for `node-runtime` `TestClient`. +/// Call executor for `kitchensink-runtime` `TestClient`. pub type ExecutorDispatch = sc_executor::NativeElseWasmExecutor; /// Default backend type. @@ -34,10 +34,10 @@ pub type Client = client::Client< Backend, client::LocalCallExecutor, node_primitives::Block, - node_runtime::RuntimeApi, + kitchensink_runtime::RuntimeApi, >; -/// Transaction for node-runtime. +/// Transaction for kitchensink-runtime. pub type Transaction = sc_client_api::backend::TransactionFor; /// Genesis configuration parameters for `TestClient`. diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index fbd28c5af0298..1eb7318db52da 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -19,7 +19,7 @@ //! Genesis Configuration. use crate::keyring::*; -use node_runtime::{ +use kitchensink_runtime::{ constants::currency::*, wasm_binary_unwrap, AccountId, BabeConfig, BalancesConfig, GenesisConfig, GrandpaConfig, IndicesConfig, SessionConfig, SocietyConfig, StakerStatus, StakingConfig, SystemConfig, BABE_GENESIS_EPOCH_CONFIG, diff --git a/bin/node/testing/src/keyring.rs b/bin/node/testing/src/keyring.rs index 41dd28bb89988..88d9dc56b0532 100644 --- a/bin/node/testing/src/keyring.rs +++ b/bin/node/testing/src/keyring.rs @@ -19,8 +19,8 @@ //! Test accounts. use codec::Encode; +use kitchensink_runtime::{CheckedExtrinsic, SessionKeys, SignedExtra, UncheckedExtrinsic}; use node_primitives::{AccountId, Balance, Index}; -use node_runtime::{CheckedExtrinsic, SessionKeys, SignedExtra, UncheckedExtrinsic}; use sp_keyring::{AccountKeyring, Ed25519Keyring, Sr25519Keyring}; use sp_runtime::generic::Era; diff --git a/bin/utils/subkey/src/main.rs b/bin/utils/subkey/src/main.rs index d85c6dbe9d048..271388549bcf9 100644 --- a/bin/utils/subkey/src/main.rs +++ b/bin/utils/subkey/src/main.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Subkey utility, based on node_runtime. +//! Subkey utility, based on kitchensink_runtime. fn main() -> Result<(), sc_cli::Error> { subkey::run() diff --git a/client/allocator/src/freeing_bump.rs b/client/allocator/src/freeing_bump.rs index 79d6fca6f91b6..e81d1b79e74ed 100644 --- a/client/allocator/src/freeing_bump.rs +++ b/client/allocator/src/freeing_bump.rs @@ -314,27 +314,50 @@ impl IndexMut for FreeLists { } } +/// Memory allocation stats gathered during the lifetime of the allocator. +#[derive(Clone, Debug, Default)] +#[non_exhaustive] +pub struct AllocationStats { + /// The current number of bytes allocated. + /// + /// This represents how many bytes are allocated *right now*. + pub bytes_allocated: u32, + + /// The peak number of bytes ever allocated. + /// + /// This is the maximum the `bytes_allocated` ever reached. + pub bytes_allocated_peak: u32, + + /// The sum of every allocation ever made. + /// + /// This increases every time a new allocation is made. + pub bytes_allocated_sum: u128, + + /// The amount of address space (in bytes) used by the allocator. + /// + /// This is calculated as the difference between the allocator's bumper + /// and the heap base. + /// + /// Currently the bumper's only ever incremented, so this is simultaneously + /// the current value as well as the peak value. + pub address_space_used: u32, +} + /// An implementation of freeing bump allocator. /// /// Refer to the module-level documentation for further details. pub struct FreeingBumpHeapAllocator { + original_heap_base: u32, bumper: u32, free_lists: FreeLists, - total_size: u32, poisoned: bool, - max_total_size: u32, - max_bumper: u32, last_observed_memory_size: u32, + stats: AllocationStats, } impl Drop for FreeingBumpHeapAllocator { fn drop(&mut self) { - log::debug!( - target: LOG_TARGET, - "allocator being destroyed, max_total_size {}, max_bumper {}", - self.max_total_size, - self.max_bumper, - ) + log::debug!(target: LOG_TARGET, "allocator dropped: {:?}", self.stats) } } @@ -348,13 +371,12 @@ impl FreeingBumpHeapAllocator { let aligned_heap_base = (heap_base + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT; FreeingBumpHeapAllocator { + original_heap_base: aligned_heap_base, bumper: aligned_heap_base, free_lists: FreeLists::new(), - total_size: 0, poisoned: false, - max_total_size: 0, - max_bumper: aligned_heap_base, last_observed_memory_size: 0, + stats: AllocationStats::default(), } } @@ -412,22 +434,13 @@ impl FreeingBumpHeapAllocator { // Write the order in the occupied header. Header::Occupied(order).write_into(mem, header_ptr)?; - self.total_size += order.size() + HEADER_SIZE; - - log::trace!( - target: LOG_TARGET, - "after allocation, total_size = {}, bumper = {}.", - self.total_size, - self.bumper, - ); + self.stats.bytes_allocated += order.size() + HEADER_SIZE; + self.stats.bytes_allocated_sum += u128::from(order.size() + HEADER_SIZE); + self.stats.bytes_allocated_peak = + std::cmp::max(self.stats.bytes_allocated_peak, self.stats.bytes_allocated); + self.stats.address_space_used = self.bumper - self.original_heap_base; - // update trackers if needed. - if self.total_size > self.max_total_size { - self.max_total_size = self.total_size; - } - if self.bumper > self.max_bumper { - self.max_bumper = self.bumper; - } + log::trace!(target: LOG_TARGET, "after allocation: {:?}", self.stats); bomb.disarm(); Ok(Pointer::new(header_ptr + HEADER_SIZE)) @@ -469,21 +482,23 @@ impl FreeingBumpHeapAllocator { let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr)); Header::Free(prev_head).write_into(mem, header_ptr)?; - // Do the total_size book keeping. - self.total_size = self - .total_size + self.stats.bytes_allocated = self + .stats + .bytes_allocated .checked_sub(order.size() + HEADER_SIZE) - .ok_or_else(|| error("Unable to subtract from total heap size without overflow"))?; - log::trace!( - "after deallocation, total_size = {}, bumper = {}.", - self.total_size, - self.bumper, - ); + .ok_or_else(|| error("underflow of the currently allocated bytes count"))?; + + log::trace!("after deallocation: {:?}", self.stats); bomb.disarm(); Ok(()) } + /// Returns the allocation stats for this allocator. + pub fn stats(&self) -> AllocationStats { + self.stats.clone() + } + /// Increases the `bumper` by `size`. /// /// Returns the `bumper` from before the increase. Returns an `Error::AllocatorOutOfSpace` if @@ -791,13 +806,13 @@ mod tests { let ptr1 = heap.allocate(&mut mem[..], 32).unwrap(); assert_eq!(ptr1, to_pointer(HEADER_SIZE)); heap.deallocate(&mut mem[..], ptr1).expect("failed freeing ptr1"); - assert_eq!(heap.total_size, 0); + assert_eq!(heap.stats.bytes_allocated, 0); assert_eq!(heap.bumper, 40); let ptr2 = heap.allocate(&mut mem[..], 16).unwrap(); assert_eq!(ptr2, to_pointer(48)); heap.deallocate(&mut mem[..], ptr2).expect("failed freeing ptr2"); - assert_eq!(heap.total_size, 0); + assert_eq!(heap.stats.bytes_allocated, 0); assert_eq!(heap.bumper, 64); // when @@ -825,7 +840,7 @@ mod tests { heap.allocate(&mut mem[..], 9).unwrap(); // then - assert_eq!(heap.total_size, HEADER_SIZE + 16); + assert_eq!(heap.stats.bytes_allocated, HEADER_SIZE + 16); } #[test] @@ -840,7 +855,7 @@ mod tests { heap.deallocate(&mut mem[..], ptr).unwrap(); // then - assert_eq!(heap.total_size, 0); + assert_eq!(heap.stats.bytes_allocated, 0); } #[test] @@ -856,7 +871,7 @@ mod tests { } // then - assert_eq!(heap.total_size, 0); + assert_eq!(heap.stats.bytes_allocated, 0); } #[test] diff --git a/client/allocator/src/lib.rs b/client/allocator/src/lib.rs index 2c3e3b2ae841d..2fe63a1ec392c 100644 --- a/client/allocator/src/lib.rs +++ b/client/allocator/src/lib.rs @@ -26,4 +26,4 @@ mod error; mod freeing_bump; pub use error::Error; -pub use freeing_bump::FreeingBumpHeapAllocator; +pub use freeing_bump::{AllocationStats, FreeingBumpHeapAllocator}; diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index c8e9dc7482823..8cb3ad565afb0 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -21,7 +21,7 @@ fnv = "1.0.6" futures = "0.3.21" hash-db = { version = "0.15.2", default-features = false } log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-executor = { version = "0.10.0-dev", path = "../executor" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index af8552886b72e..bcc7a9bff3b2d 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -32,7 +32,8 @@ use sp_runtime::{ Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ - ChildStorageCollection, IndexOperation, OffchainChangesCollection, StorageCollection, + backend::AsTrieBackend, ChildStorageCollection, IndexOperation, OffchainChangesCollection, + StorageCollection, }; use sp_storage::{ChildInfo, StorageData, StorageKey}; use std::collections::{HashMap, HashSet}; @@ -448,7 +449,12 @@ pub trait Backend: AuxStore + Send + Sync { /// Associated blockchain backend type. type Blockchain: BlockchainBackend; /// Associated state backend type. - type State: StateBackend> + Send; + type State: StateBackend> + + Send + + AsTrieBackend< + HashFor, + TrieBackendStorage = >>::TrieBackendStorage, + >; /// Offchain workers local storage. type OffchainStorage: OffchainStorage; @@ -507,7 +513,8 @@ pub trait Backend: AuxStore + Send + Sync { /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set it will attempt to /// revert past any finalized block, this is unsafe and can potentially leave the node in an - /// inconsistent state. + /// inconsistent state. All blocks higher than the best block are also reverted and not counting + /// towards `n`. /// /// Returns the number of blocks that were successfully reverted and the list of finalized /// blocks that has been reverted. diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index a8a7442a8ef9f..9000f62aa6cc3 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -173,7 +173,7 @@ impl Blockchain { { let mut storage = self.storage.write(); - storage.leaves.import(hash, number, header.parent_hash().clone()); + storage.leaves.import(hash, number, *header.parent_hash()); storage.blocks.insert(hash, StoredBlock::new(header, body, justifications)); if let NewBlockState::Final = new_state { diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index 5859290777433..cdcb80a110b74 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -32,32 +32,36 @@ struct LeafSetItem { number: Reverse, } -/// A displaced leaf after import. -#[must_use = "Displaced items from the leaf set must be handled."] -pub struct ImportDisplaced { - new_hash: H, - displaced: LeafSetItem, +/// Inserted and removed leaves after an import action. +pub struct ImportOutcome { + inserted: LeafSetItem, + removed: Option, } -/// Displaced leaves after finalization. -#[must_use = "Displaced items from the leaf set must be handled."] -pub struct FinalizationDisplaced { - leaves: BTreeMap, Vec>, +/// Inserted and removed leaves after a remove action. +pub struct RemoveOutcome { + inserted: Option, + removed: LeafSetItem, } -impl FinalizationDisplaced { +/// Removed leaves after a finalization action. +pub struct FinalizationOutcome { + removed: BTreeMap, Vec>, +} + +impl FinalizationOutcome { /// Merge with another. This should only be used for displaced items that /// are produced within one transaction of each other. pub fn merge(&mut self, mut other: Self) { // this will ignore keys that are in duplicate, however // if these are actually produced correctly via the leaf-set within // one transaction, then there will be no overlap in the keys. - self.leaves.append(&mut other.leaves); + self.removed.append(&mut other.removed); } /// Iterate over all displaced leaves. pub fn leaves(&self) -> impl Iterator { - self.leaves.values().flatten() + self.removed.values().flatten() } } @@ -67,8 +71,6 @@ impl FinalizationDisplaced { #[derive(Debug, Clone, PartialEq, Eq)] pub struct LeafSet { storage: BTreeMap, Vec>, - pending_added: Vec<(H, N)>, - pending_removed: Vec, } impl LeafSet @@ -78,7 +80,7 @@ where { /// Construct a new, blank leaf set. pub fn new() -> Self { - Self { storage: BTreeMap::new(), pending_added: Vec::new(), pending_removed: Vec::new() } + Self { storage: BTreeMap::new() } } /// Read the leaf list from the DB, using given prefix for keys. @@ -97,32 +99,56 @@ where }, None => {}, } - Ok(Self { storage, pending_added: Vec::new(), pending_removed: Vec::new() }) + Ok(Self { storage }) } - /// update the leaf list on import. returns a displaced leaf if there was one. - pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> Option> { - // avoid underflow for genesis. - let displaced = if number != N::zero() { - let new_number = Reverse(number.clone() - N::one()); - let was_displaced = self.remove_leaf(&new_number, &parent_hash); - - if was_displaced { - self.pending_removed.push(parent_hash.clone()); - Some(ImportDisplaced { - new_hash: hash.clone(), - displaced: LeafSetItem { hash: parent_hash, number: new_number }, - }) - } else { - None - } + /// Update the leaf list on import. + pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> ImportOutcome { + let number = Reverse(number); + + let removed = if number.0 != N::zero() { + let parent_number = Reverse(number.0.clone() - N::one()); + self.remove_leaf(&parent_number, &parent_hash).then(|| parent_hash) } else { None }; - self.insert_leaf(Reverse(number.clone()), hash.clone()); - self.pending_added.push((hash, number)); - displaced + self.insert_leaf(number.clone(), hash.clone()); + + ImportOutcome { inserted: LeafSetItem { hash, number }, removed } + } + + /// Update the leaf list on removal. + /// + /// Note that the leaves set structure doesn't have the information to decide if the + /// leaf we're removing is the last children of the parent. Follows that this method requires + /// the caller to check this condition and optionally pass the `parent_hash` if `hash` is + /// its last child. + /// + /// Returns `None` if no modifications are applied. + pub fn remove( + &mut self, + hash: H, + number: N, + parent_hash: Option, + ) -> Option> { + let number = Reverse(number); + + if !self.remove_leaf(&number, &hash) { + return None + } + + let inserted = parent_hash.and_then(|parent_hash| { + if number.0 != N::zero() { + let parent_number = Reverse(number.0.clone() - N::one()); + self.insert_leaf(parent_number, parent_hash.clone()); + Some(parent_hash) + } else { + None + } + }); + + Some(RemoveOutcome { inserted, removed: LeafSetItem { hash, number } }) } /// Note a block height finalized, displacing all leaves with number less than the finalized @@ -132,17 +158,15 @@ where /// same number as the finalized block, but with different hashes, the current behavior /// is simpler and our assumptions about how finalization works means that those leaves /// will be pruned soon afterwards anyway. - pub fn finalize_height(&mut self, number: N) -> FinalizationDisplaced { + pub fn finalize_height(&mut self, number: N) -> FinalizationOutcome { let boundary = if number == N::zero() { - return FinalizationDisplaced { leaves: BTreeMap::new() } + return FinalizationOutcome { removed: BTreeMap::new() } } else { number - N::one() }; let below_boundary = self.storage.split_off(&Reverse(boundary)); - self.pending_removed - .extend(below_boundary.values().flat_map(|h| h.iter()).cloned()); - FinalizationDisplaced { leaves: below_boundary } + FinalizationOutcome { removed: below_boundary } } /// The same as [`Self::finalize_height`], but it only simulates the operation. @@ -150,16 +174,16 @@ where /// This means that no changes are done. /// /// Returns the leaves that would be displaced by finalizing the given block. - pub fn displaced_by_finalize_height(&self, number: N) -> FinalizationDisplaced { + pub fn displaced_by_finalize_height(&self, number: N) -> FinalizationOutcome { let boundary = if number == N::zero() { - return FinalizationDisplaced { leaves: BTreeMap::new() } + return FinalizationOutcome { removed: BTreeMap::new() } } else { number - N::one() }; let below_boundary = self.storage.range(&Reverse(boundary)..); - FinalizationDisplaced { - leaves: below_boundary.map(|(k, v)| (k.clone(), v.clone())).collect(), + FinalizationOutcome { + removed: below_boundary.map(|(k, v)| (k.clone(), v.clone())).collect(), } } @@ -188,8 +212,6 @@ where self.remove_leaf(number, hash), "item comes from an iterator over storage; qed", ); - - self.pending_removed.push(hash.clone()); } } @@ -203,7 +225,6 @@ where // this is an invariant of regular block import. if !leaves_contains_best { self.insert_leaf(best_number.clone(), best_hash.clone()); - self.pending_added.push((best_hash, best_number.0)); } } @@ -213,9 +234,9 @@ where self.storage.iter().flat_map(|(_, hashes)| hashes.iter()).cloned().collect() } - /// Number of known leaves + /// Number of known leaves. pub fn count(&self) -> usize { - self.storage.len() + self.storage.values().map(|level| level.len()).sum() } /// Write the leaf list to the database transaction. @@ -227,8 +248,6 @@ where ) { let leaves: Vec<_> = self.storage.iter().map(|(n, h)| (n.0.clone(), h.clone())).collect(); tx.set_from_vec(column, prefix, leaves.encode()); - self.pending_added.clear(); - self.pending_removed.clear(); } /// Check if given block is a leaf. @@ -242,7 +261,7 @@ where self.storage.entry(number).or_insert_with(Vec::new).push(hash); } - // returns true if this leaf was contained, false otherwise. + // Returns true if this leaf was contained, false otherwise. fn remove_leaf(&mut self, number: &Reverse, hash: &H) -> bool { let mut empty = false; let removed = self.storage.get_mut(number).map_or(false, |leaves| { @@ -269,6 +288,11 @@ where removed } + + /// Returns the highest leaf and all hashes associated to it. + pub fn highest_leaf(&self) -> Option<(N, &[H])> { + self.storage.iter().next().map(|(k, v)| (k.0.clone(), &v[..])) + } } /// Helper for undoing operations. @@ -281,23 +305,30 @@ where H: Clone + PartialEq + Decode + Encode, N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, { - /// Undo an imported block by providing the displaced leaf. - pub fn undo_import(&mut self, displaced: ImportDisplaced) { - let new_number = Reverse(displaced.displaced.number.0.clone() + N::one()); - self.inner.remove_leaf(&new_number, &displaced.new_hash); - self.inner.insert_leaf(new_number, displaced.displaced.hash); + /// Undo an imported block by providing the import operation outcome. + /// No additional operations should be performed between import and undo. + pub fn undo_import(&mut self, outcome: ImportOutcome) { + if let Some(removed_hash) = outcome.removed { + let removed_number = Reverse(outcome.inserted.number.0.clone() - N::one()); + self.inner.insert_leaf(removed_number, removed_hash); + } + self.inner.remove_leaf(&outcome.inserted.number, &outcome.inserted.hash); } - /// Undo a finalization operation by providing the displaced leaves. - pub fn undo_finalization(&mut self, mut displaced: FinalizationDisplaced) { - self.inner.storage.append(&mut displaced.leaves); + /// Undo a removed block by providing the displaced leaf. + /// No additional operations should be performed between remove and undo. + pub fn undo_remove(&mut self, outcome: RemoveOutcome) { + if let Some(inserted_hash) = outcome.inserted { + let inserted_number = Reverse(outcome.removed.number.0.clone() - N::one()); + self.inner.remove_leaf(&inserted_number, &inserted_hash); + } + self.inner.insert_leaf(outcome.removed.number, outcome.removed.hash); } -} -impl<'a, H: 'a, N: 'a> Drop for Undo<'a, H, N> { - fn drop(&mut self) { - self.inner.pending_added.clear(); - self.inner.pending_removed.clear(); + /// Undo a finalization operation by providing the displaced leaves. + /// No additional operations should be performed between finalization and undo. + pub fn undo_finalization(&mut self, mut outcome: FinalizationOutcome) { + self.inner.storage.append(&mut outcome.removed); } } @@ -307,7 +338,7 @@ mod tests { use std::sync::Arc; #[test] - fn it_works() { + fn import_works() { let mut set = LeafSet::new(); set.import(0u32, 0u32, 0u32); @@ -315,15 +346,104 @@ mod tests { set.import(2_1, 2, 1_1); set.import(3_1, 3, 2_1); + assert_eq!(set.count(), 1); assert!(set.contains(3, 3_1)); assert!(!set.contains(2, 2_1)); assert!(!set.contains(1, 1_1)); assert!(!set.contains(0, 0)); set.import(2_2, 2, 1_1); + set.import(1_2, 1, 0); + set.import(2_3, 2, 1_2); + + assert_eq!(set.count(), 3); + assert!(set.contains(3, 3_1)); + assert!(set.contains(2, 2_2)); + assert!(set.contains(2, 2_3)); + + // Finally test the undo feature + + let outcome = set.import(2_4, 2, 1_1); + assert_eq!(outcome.inserted.hash, 2_4); + assert_eq!(outcome.removed, None); + assert_eq!(set.count(), 4); + assert!(set.contains(2, 2_4)); + + set.undo().undo_import(outcome); + assert_eq!(set.count(), 3); + assert!(set.contains(3, 3_1)); + assert!(set.contains(2, 2_2)); + assert!(set.contains(2, 2_3)); + let outcome = set.import(3_2, 3, 2_3); + assert_eq!(outcome.inserted.hash, 3_2); + assert_eq!(outcome.removed, Some(2_3)); + assert_eq!(set.count(), 3); + assert!(set.contains(3, 3_2)); + + set.undo().undo_import(outcome); + assert_eq!(set.count(), 3); assert!(set.contains(3, 3_1)); assert!(set.contains(2, 2_2)); + assert!(set.contains(2, 2_3)); + } + + #[test] + fn removal_works() { + let mut set = LeafSet::new(); + set.import(10_1u32, 10u32, 0u32); + set.import(11_1, 11, 10_1); + set.import(11_2, 11, 10_1); + set.import(12_1, 12, 11_1); + + let outcome = set.remove(12_1, 12, Some(11_1)).unwrap(); + assert_eq!(outcome.removed.hash, 12_1); + assert_eq!(outcome.inserted, Some(11_1)); + assert_eq!(set.count(), 2); + assert!(set.contains(11, 11_1)); + assert!(set.contains(11, 11_2)); + + let outcome = set.remove(11_1, 11, None).unwrap(); + assert_eq!(outcome.removed.hash, 11_1); + assert_eq!(outcome.inserted, None); + assert_eq!(set.count(), 1); + assert!(set.contains(11, 11_2)); + + let outcome = set.remove(11_2, 11, Some(10_1)).unwrap(); + assert_eq!(outcome.removed.hash, 11_2); + assert_eq!(outcome.inserted, Some(10_1)); + assert_eq!(set.count(), 1); + assert!(set.contains(10, 10_1)); + + set.undo().undo_remove(outcome); + assert_eq!(set.count(), 1); + assert!(set.contains(11, 11_2)); + } + + #[test] + fn finalization_works() { + let mut set = LeafSet::new(); + set.import(9_1u32, 9u32, 0u32); + set.import(10_1, 10, 9_1); + set.import(10_2, 10, 9_1); + set.import(11_1, 11, 10_1); + set.import(11_2, 11, 10_1); + set.import(12_1, 12, 11_2); + + let outcome = set.finalize_height(11); + assert_eq!(set.count(), 2); + assert!(set.contains(11, 11_1)); + assert!(set.contains(12, 12_1)); + assert_eq!( + outcome.removed, + [(Reverse(10), vec![10_2])].into_iter().collect::>(), + ); + + set.undo().undo_finalization(outcome); + assert_eq!(set.count(), 3); + assert!(set.contains(11, 11_1)); + assert!(set.contains(12, 12_1)); + assert!(set.contains(10, 10_2)); } #[test] @@ -390,19 +510,4 @@ mod tests { let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap(); assert_eq!(set, set2); } - - #[test] - fn undo_finalization() { - let mut set = LeafSet::new(); - set.import(10_1u32, 10u32, 0u32); - set.import(11_1, 11, 10_2); - set.import(11_2, 11, 10_2); - set.import(12_1, 12, 11_123); - - let displaced = set.finalize_height(11); - assert!(!set.contains(10, 10_1)); - - set.undo().undo_finalization(displaced); - assert!(set.contains(10, 10_1)); - } } diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 32a5e23bd49d9..b7a140f24a111 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -17,19 +17,18 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.10" [dependencies] -async-trait = "0.1" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } futures = "0.3.21" futures-timer = "3.0.1" ip_network = "0.4.1" -libp2p = { version = "0.45.1", default-features = false, features = ["kad"] } +libp2p = { version = "0.46.1", default-features = false, features = ["kad"] } log = "0.4.17" prost = "0.10" rand = "0.7.2" thiserror = "1.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../api" } -sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-authority-discovery = { version = "4.0.0-dev", path = "../../primitives/authority-discovery" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } diff --git a/client/authority-discovery/src/error.rs b/client/authority-discovery/src/error.rs index bce39069ef7c7..285a2714b81f5 100644 --- a/client/authority-discovery/src/error.rs +++ b/client/authority-discovery/src/error.rs @@ -57,7 +57,7 @@ pub enum Error { ParsingMultiaddress(#[from] libp2p::core::multiaddr::Error), #[error("Failed to parse a libp2p key.")] - ParsingLibp2pIdentity(#[from] sc_network::DecodingError), + ParsingLibp2pIdentity(#[from] libp2p::identity::error::DecodingError), #[error("Failed to sign using a specific public key.")] MissingSignature(CryptoTypePublicPair), diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 8522da9984a6f..f0ef374551617 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -39,8 +39,9 @@ use futures::{ Stream, }; +use libp2p::{Multiaddr, PeerId}; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{DhtEvent, Multiaddr, PeerId}; +use sc_network_common::protocol::event::DhtEvent; use sp_api::ProvideRuntimeApi; use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId}; use sp_runtime::traits::Block as BlockT; diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs index c240e5d0c2287..df09b6ea43216 100644 --- a/client/authority-discovery/src/service.rs +++ b/client/authority-discovery/src/service.rs @@ -25,7 +25,7 @@ use futures::{ SinkExt, }; -use sc_network::{Multiaddr, PeerId}; +use libp2p::{Multiaddr, PeerId}; use sp_authority_discovery::AuthorityId; /// Service to interact with the [`crate::Worker`]. diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index e9f94b6a186db..334b2638ca58c 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -87,12 +87,12 @@ fn get_addresses_and_authority_id() { fn cryptos_are_compatible() { use sp_core::crypto::Pair; - let libp2p_secret = sc_network::Keypair::generate_ed25519(); + let libp2p_secret = libp2p::identity::Keypair::generate_ed25519(); let libp2p_public = libp2p_secret.public(); let sp_core_secret = { let libp2p_ed_secret = match libp2p_secret.clone() { - sc_network::Keypair::Ed25519(x) => x, + libp2p::identity::Keypair::Ed25519(x) => x, _ => panic!("generate_ed25519 should have generated an Ed25519 key ¯\\_(ツ)_/¯"), }; sp_core::ed25519::Pair::from_seed_slice(&libp2p_ed_secret.secret().as_ref()).unwrap() diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 87cc72ba7a69c..f13a1cf33581b 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -32,19 +32,22 @@ use std::{ use futures::{channel::mpsc, future, stream::Fuse, FutureExt, Stream, StreamExt}; use addr_cache::AddrCache; -use async_trait::async_trait; use codec::Decode; use ip_network::IpNetwork; use libp2p::{ core::multiaddr, multihash::{Multihash, MultihashDigest}, + Multiaddr, PeerId, }; use log::{debug, error, log_enabled}; use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; use prost::Message; use rand::{seq::SliceRandom, thread_rng}; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{DhtEvent, ExHashT, Multiaddr, NetworkStateInfo, PeerId}; +use sc_network_common::{ + protocol::event::DhtEvent, + service::{KademliaKey, NetworkDHTProvider, NetworkSigner, NetworkStateInfo, Signature}, +}; use sp_api::ProvideRuntimeApi; use sp_authority_discovery::{ AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature, @@ -136,7 +139,7 @@ pub struct Worker { /// Queue of throttled lookups pending to be passed to the network. pending_lookups: Vec, /// Set of in-flight lookups. - in_flight_lookups: HashMap, + in_flight_lookups: HashMap, addr_cache: addr_cache::AddrCache, @@ -464,10 +467,7 @@ where } } - fn handle_dht_value_found_event( - &mut self, - values: Vec<(sc_network::KademliaKey, Vec)>, - ) -> Result<()> { + fn handle_dht_value_found_event(&mut self, values: Vec<(KademliaKey, Vec)>) -> Result<()> { // Ensure `values` is not empty and all its keys equal. let remote_key = single(values.iter().map(|(key, _)| key.clone())) .map_err(|_| Error::ReceivingDhtValueFoundEventWithDifferentKeys)? @@ -523,11 +523,11 @@ where // properly signed by the owner of the PeerId if let Some(peer_signature) = peer_signature { - let public_key = - sc_network::PublicKey::from_protobuf_encoding(&peer_signature.public_key) - .map_err(Error::ParsingLibp2pIdentity)?; - let signature = - sc_network::Signature { public_key, bytes: peer_signature.signature }; + let public_key = libp2p::identity::PublicKey::from_protobuf_encoding( + &peer_signature.public_key, + ) + .map_err(Error::ParsingLibp2pIdentity)?; + let signature = Signature { public_key, bytes: peer_signature.signature }; if !signature.verify(record, &remote_peer_id) { return Err(Error::VerifyingDhtPayload) @@ -590,55 +590,15 @@ where } } -pub trait NetworkSigner { - /// Sign a message in the name of `self.local_peer_id()` - fn sign_with_local_identity( - &self, - msg: impl AsRef<[u8]>, - ) -> std::result::Result; -} - /// NetworkProvider provides [`Worker`] with all necessary hooks into the /// underlying Substrate networking. Using this trait abstraction instead of -/// [`sc_network::NetworkService`] directly is necessary to unit test [`Worker`]. -#[async_trait] -pub trait NetworkProvider: NetworkStateInfo + NetworkSigner { - /// Start putting a value in the Dht. - fn put_value(&self, key: sc_network::KademliaKey, value: Vec); - - /// Start getting a value from the Dht. - fn get_value(&self, key: &sc_network::KademliaKey); -} +/// `sc_network::NetworkService` directly is necessary to unit test [`Worker`]. +pub trait NetworkProvider: NetworkDHTProvider + NetworkStateInfo + NetworkSigner {} -impl NetworkSigner for sc_network::NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn sign_with_local_identity( - &self, - msg: impl AsRef<[u8]>, - ) -> std::result::Result { - self.sign_with_local_identity(msg) - } -} - -#[async_trait::async_trait] -impl NetworkProvider for sc_network::NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn put_value(&self, key: sc_network::KademliaKey, value: Vec) { - self.put_value(key, value) - } - fn get_value(&self, key: &sc_network::KademliaKey) { - self.get_value(key) - } -} +impl NetworkProvider for T where T: NetworkDHTProvider + NetworkStateInfo + NetworkSigner {} -fn hash_authority_id(id: &[u8]) -> sc_network::KademliaKey { - sc_network::KademliaKey::new(&libp2p::multihash::Code::Sha2_256.digest(id).digest()) +fn hash_authority_id(id: &[u8]) -> KademliaKey { + KademliaKey::new(&libp2p::multihash::Code::Sha2_256.digest(id).digest()) } // Makes sure all values are the same and returns it @@ -685,7 +645,7 @@ async fn sign_record_with_authority_ids( peer_signature: Option, key_store: &dyn CryptoStore, keys: Vec, -) -> Result)>> { +) -> Result)>> { let signatures = key_store .sign_with_all(key_types::AUTHORITY_DISCOVERY, keys.clone(), &serialized_record) .await diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index f768b9c4e66a7..19bbbf0b62e7e 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -16,9 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use libp2p::core::multiaddr::{Multiaddr, Protocol}; - -use sc_network::PeerId; +use libp2p::{ + core::multiaddr::{Multiaddr, Protocol}, + PeerId, +}; use sp_authority_discovery::AuthorityId; use std::collections::{hash_map::Entry, HashMap, HashSet}; diff --git a/client/authority-discovery/src/worker/schema/tests.rs b/client/authority-discovery/src/worker/schema/tests.rs index b85a4ce37447d..60147d6762e50 100644 --- a/client/authority-discovery/src/worker/schema/tests.rs +++ b/client/authority-discovery/src/worker/schema/tests.rs @@ -21,9 +21,8 @@ mod schema_v1 { } use super::*; -use libp2p::multiaddr::Multiaddr; +use libp2p::{multiaddr::Multiaddr, PeerId}; use prost::Message; -use sc_network::PeerId; #[test] fn v2_decodes_v1() { @@ -56,7 +55,7 @@ fn v2_decodes_v1() { #[test] fn v1_decodes_v2() { - let peer_secret = sc_network::Keypair::generate_ed25519(); + let peer_secret = libp2p::identity::Keypair::generate_ed25519(); let peer_public = peer_secret.public(); let peer_id = peer_public.to_peer_id(); let multiaddress: Multiaddr = diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index a1a699bc30dd2..7b0ee45833e19 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -22,7 +22,6 @@ use std::{ task::Poll, }; -use async_trait::async_trait; use futures::{ channel::mpsc::{self, channel}, executor::{block_on, LocalPool}, @@ -30,9 +29,10 @@ use futures::{ sink::SinkExt, task::LocalSpawn, }; -use libp2p::{core::multiaddr, PeerId}; +use libp2p::{core::multiaddr, identity::Keypair, PeerId}; use prometheus_endpoint::prometheus::default_registry; +use sc_network_common::service::{KademliaKey, Signature, SigningError}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_keystore::{testing::KeyStore, CryptoStore}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; @@ -48,7 +48,7 @@ pub(crate) struct TestApi { impl ProvideRuntimeApi for TestApi { type Api = RuntimeApi; - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { + fn runtime_api(&self) -> ApiRef<'_, Self::Api> { RuntimeApi { authorities: self.authorities.clone() }.into() } } @@ -111,18 +111,18 @@ sp_api::mock_impl_runtime_apis! { #[derive(Debug)] pub enum TestNetworkEvent { - GetCalled(sc_network::KademliaKey), - PutCalled(sc_network::KademliaKey, Vec), + GetCalled(KademliaKey), + PutCalled(KademliaKey, Vec), } pub struct TestNetwork { peer_id: PeerId, - identity: sc_network::Keypair, + identity: Keypair, external_addresses: Vec, // Whenever functions on `TestNetwork` are called, the function arguments are added to the // vectors below. - pub put_value_call: Arc)>>>, - pub get_value_call: Arc>>, + pub put_value_call: Arc)>>>, + pub get_value_call: Arc>>, event_sender: mpsc::UnboundedSender, event_receiver: Option>, } @@ -136,7 +136,7 @@ impl TestNetwork { impl Default for TestNetwork { fn default() -> Self { let (tx, rx) = mpsc::unbounded(); - let identity = sc_network::Keypair::generate_ed25519(); + let identity = Keypair::generate_ed25519(); TestNetwork { peer_id: identity.public().to_peer_id(), identity, @@ -153,21 +153,20 @@ impl NetworkSigner for TestNetwork { fn sign_with_local_identity( &self, msg: impl AsRef<[u8]>, - ) -> std::result::Result { - sc_network::Signature::sign_message(msg, &self.identity) + ) -> std::result::Result { + Signature::sign_message(msg, &self.identity) } } -#[async_trait] -impl NetworkProvider for TestNetwork { - fn put_value(&self, key: sc_network::KademliaKey, value: Vec) { +impl NetworkDHTProvider for TestNetwork { + fn put_value(&self, key: KademliaKey, value: Vec) { self.put_value_call.lock().unwrap().push((key.clone(), value.clone())); self.event_sender .clone() .unbounded_send(TestNetworkEvent::PutCalled(key, value)) .unwrap(); } - fn get_value(&self, key: &sc_network::KademliaKey) { + fn get_value(&self, key: &KademliaKey) { self.get_value_call.lock().unwrap().push(key.clone()); self.event_sender .clone() @@ -186,12 +185,16 @@ impl NetworkStateInfo for TestNetwork { } } -impl NetworkSigner for sc_network::Keypair { +struct TestSigner<'a> { + keypair: &'a Keypair, +} + +impl<'a> NetworkSigner for TestSigner<'a> { fn sign_with_local_identity( &self, msg: impl AsRef<[u8]>, - ) -> std::result::Result { - sc_network::Signature::sign_message(msg, self) + ) -> std::result::Result { + Signature::sign_message(msg, self.keypair) } } @@ -200,7 +203,7 @@ async fn build_dht_event( public_key: AuthorityId, key_store: &dyn CryptoStore, network: Option<&Signer>, -) -> Vec<(sc_network::KademliaKey, Vec)> { +) -> Vec<(KademliaKey, Vec)> { let serialized_record = serialize_authority_record(serialize_addresses(addresses.into_iter())).unwrap(); @@ -313,7 +316,7 @@ fn publish_discover_cycle() { let dht_event = { let (key, value) = network.put_value_call.lock().unwrap().pop().unwrap(); - sc_network::DhtEvent::ValueFound(vec![(key, value)]) + DhtEvent::ValueFound(vec![(key, value)]) }; // Node B discovering node A's address. @@ -469,7 +472,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { None, ) .await; - sc_network::DhtEvent::ValueFound(kv_pairs) + DhtEvent::ValueFound(kv_pairs) }; dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); @@ -487,7 +490,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { struct DhtValueFoundTester { pub remote_key_store: KeyStore, pub remote_authority_public: sp_core::sr25519::Public, - pub remote_node_key: sc_network::Keypair, + pub remote_node_key: Keypair, pub local_worker: Option< Worker< TestApi, @@ -496,7 +499,7 @@ struct DhtValueFoundTester { sp_runtime::generic::Header, substrate_test_runtime_client::runtime::Extrinsic, >, - std::pin::Pin>>, + std::pin::Pin>>, >, >, } @@ -508,7 +511,7 @@ impl DhtValueFoundTester { block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) .unwrap(); - let remote_node_key = sc_network::Keypair::generate_ed25519(); + let remote_node_key = Keypair::generate_ed25519(); Self { remote_key_store, remote_authority_public, remote_node_key, local_worker: None } } @@ -523,11 +526,11 @@ impl DhtValueFoundTester { fn process_value_found( &mut self, strict_record_validation: bool, - values: Vec<(sc_network::KademliaKey, Vec)>, + values: Vec<(KademliaKey, Vec)>, ) -> Option<&HashSet> { let (_dht_event_tx, dht_event_rx) = channel(1); let local_test_api = - Arc::new(TestApi { authorities: vec![self.remote_authority_public.clone().into()] }); + Arc::new(TestApi { authorities: vec![self.remote_authority_public.into()] }); let local_network: Arc = Arc::new(Default::default()); let local_key_store = KeyStore::new(); @@ -552,8 +555,7 @@ impl DhtValueFoundTester { self.local_worker .as_ref() .map(|w| { - w.addr_cache - .get_addresses_by_authority_id(&self.remote_authority_public.clone().into()) + w.addr_cache.get_addresses_by_authority_id(&self.remote_authority_public.into()) }) .unwrap() } @@ -566,7 +568,7 @@ fn limit_number_of_addresses_added_to_cache_per_authority() { let addresses = (1..100).map(|i| tester.multiaddr_with_peer_id(i)).collect(); let kv_pairs = block_on(build_dht_event::( addresses, - tester.remote_authority_public.clone().into(), + tester.remote_authority_public.into(), &tester.remote_key_store, None, )); @@ -581,9 +583,9 @@ fn strict_accept_address_with_peer_signature() { let addr = tester.multiaddr_with_peer_id(1); let kv_pairs = block_on(build_dht_event( vec![addr.clone()], - tester.remote_authority_public.clone().into(), + tester.remote_authority_public.into(), &tester.remote_key_store, - Some(&tester.remote_node_key), + Some(&TestSigner { keypair: &tester.remote_node_key }), )); let cached_remote_addresses = tester.process_value_found(true, kv_pairs); @@ -598,12 +600,12 @@ fn strict_accept_address_with_peer_signature() { #[test] fn reject_address_with_rogue_peer_signature() { let mut tester = DhtValueFoundTester::new(); - let rogue_remote_node_key = sc_network::Keypair::generate_ed25519(); + let rogue_remote_node_key = Keypair::generate_ed25519(); let kv_pairs = block_on(build_dht_event( vec![tester.multiaddr_with_peer_id(1)], - tester.remote_authority_public.clone().into(), + tester.remote_authority_public.into(), &tester.remote_key_store, - Some(&rogue_remote_node_key), + Some(&TestSigner { keypair: &rogue_remote_node_key }), )); let cached_remote_addresses = tester.process_value_found(false, kv_pairs); @@ -619,9 +621,9 @@ fn reject_address_with_invalid_peer_signature() { let mut tester = DhtValueFoundTester::new(); let mut kv_pairs = block_on(build_dht_event( vec![tester.multiaddr_with_peer_id(1)], - tester.remote_authority_public.clone().into(), + tester.remote_authority_public.into(), &tester.remote_key_store, - Some(&tester.remote_node_key), + Some(&TestSigner { keypair: &tester.remote_node_key }), )); // tamper with the signature let mut record = schema::SignedAuthorityRecord::decode(kv_pairs[0].1.as_slice()).unwrap(); @@ -641,7 +643,7 @@ fn reject_address_without_peer_signature() { let mut tester = DhtValueFoundTester::new(); let kv_pairs = block_on(build_dht_event::( vec![tester.multiaddr_with_peer_id(1)], - tester.remote_authority_public.clone().into(), + tester.remote_authority_public.into(), &tester.remote_key_store, None, )); @@ -659,7 +661,7 @@ fn do_not_cache_addresses_without_peer_id() { "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse().unwrap(); let kv_pairs = block_on(build_dht_event::( vec![multiaddr_with_peer_id.clone(), multiaddr_without_peer_id], - tester.remote_authority_public.clone().into(), + tester.remote_authority_public.into(), &tester.remote_key_store, None, )); @@ -808,7 +810,7 @@ fn lookup_throttling() { None, ) .await; - sc_network::DhtEvent::ValueFound(kv_pairs) + DhtEvent::ValueFound(kv_pairs) }; dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); @@ -822,7 +824,7 @@ fn lookup_throttling() { // Make second one fail. let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); - let dht_event = sc_network::DhtEvent::ValueNotFound(remote_hash); + let dht_event = DhtEvent::ValueNotFound(remote_hash); dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); // Assert worker to trigger another lookup. diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index a2ccba486ae29..43493ada051f8 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -31,6 +31,6 @@ sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] -parking_lot = "0.12.0" +parking_lot = "0.12.1" sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index b67008bc6f44b..f5ccd9023a3db 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -34,9 +34,7 @@ use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend}; -use sp_consensus::{ - evaluation, DisableProofRecording, EnableProofRecording, ProofRecording, Proposal, -}; +use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal}; use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; use sp_runtime::{ @@ -205,7 +203,6 @@ where let proposer = Proposer::<_, _, _, _, PR> { spawn_handle: self.spawn_handle.clone(), client: self.client.clone(), - parent_hash, parent_id: id, parent_number: *parent_header.number(), transaction_pool: self.transaction_pool.clone(), @@ -250,7 +247,6 @@ where pub struct Proposer { spawn_handle: Box, client: Arc, - parent_hash: ::Hash, parent_id: BlockId, parent_number: <::Header as HeaderT>::Number, transaction_pool: Arc, @@ -536,12 +532,6 @@ where "hash" => ?::Hash::from(block.header().hash()), ); - if let Err(err) = - evaluation::evaluate_initial(&block, &self.parent_hash, self.parent_number) - { - error!("Failed to evaluate authored block: {:?}", err); - } - let proof = PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?; @@ -865,10 +855,18 @@ mod tests { .expect("header get error") .expect("there should be header"); - let extrinsics_num = 4; - let extrinsics = (0..extrinsics_num) - .map(|v| Extrinsic::IncludeData(vec![v as u8; 10])) - .collect::>(); + let extrinsics_num = 5; + let extrinsics = std::iter::once( + Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 100, + nonce: 0, + } + .into_signed_tx(), + ) + .chain((0..extrinsics_num - 1).map(|v| Extrinsic::IncludeData(vec![v as u8; 10]))) + .collect::>(); let block_limit = genesis_header.encoded_size() + extrinsics @@ -932,8 +930,9 @@ mod tests { .unwrap(); // The block limit didn't changed, but we now include the proof in the estimation of the - // block size and thus, one less transaction should fit into the limit. - assert_eq!(block.extrinsics().len(), extrinsics_num - 2); + // block size and thus, only the `Transfer` will fit into the block. It reads more data + // than we have reserved in the block limit. + assert_eq!(block.extrinsics().len(), 1); } #[test] diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index bd25496f2dfea..5cc2952e26ba5 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -9,19 +9,21 @@ description = "BEEFY Client gadget for substrate" homepage = "https://substrate.io" [dependencies] +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } fnv = "1.0.6" futures = "0.3" futures-timer = "3.0.1" hex = "0.4.2" log = "0.4" -parking_lot = "0.12.0" +parking_lot = "0.12.1" thiserror = "1.0" wasm-timer = "0.2.5" beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy" } prometheus = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-chain-spec = { version = "4.0.0-dev", path = "../../client/chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-finality-grandpa = { version = "0.10.0-dev", path = "../../client/finality-grandpa" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } @@ -39,10 +41,10 @@ sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] serde = "1.0.136" -strum = { version = "0.23", features = ["derive"] } +strum = { version = "0.24.1", features = ["derive"] } tempfile = "3.1.0" tokio = "1.17.0" -sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } +sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-network-test = { version = "0.8.0", path = "../network/test" } sp-finality-grandpa = { version = "4.0.0-dev", path = "../../primitives/finality-grandpa" } sp-keyring = { version = "6.0.0", path = "../../primitives/keyring" } diff --git a/client/beefy/rpc/Cargo.toml b/client/beefy/rpc/Cargo.toml index 7d31aea3f6971..3ccf83c1f5106 100644 --- a/client/beefy/rpc/Cargo.toml +++ b/client/beefy/rpc/Cargo.toml @@ -11,9 +11,9 @@ homepage = "https://substrate.io" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.21" -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } log = "0.4" -parking_lot = "0.12.0" +parking_lot = "0.12.1" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0" beefy-gadget = { version = "4.0.0-dev", path = "../." } @@ -24,7 +24,7 @@ sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" sc-rpc = { version = "4.0.0-dev", features = [ "test-helpers", ], path = "../../rpc" } diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 2c3ffda056c26..3be182ceb8f39 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -30,12 +30,12 @@ use futures::{task::SpawnError, FutureExt, StreamExt}; use jsonrpsee::{ core::{async_trait, Error as JsonRpseeError, RpcResult}, proc_macros::rpc, - types::{error::CallError, ErrorObject}, - PendingSubscription, + types::{error::CallError, ErrorObject, SubscriptionResult}, + SubscriptionSink, }; use log::warn; -use beefy_gadget::notification::{BeefyBestBlockStream, BeefySignedCommitmentStream}; +use beefy_gadget::notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}; mod notification; @@ -101,7 +101,7 @@ pub trait BeefyApi { /// Implements the BeefyApi RPC trait for interacting with BEEFY. pub struct Beefy { - signed_commitment_stream: BeefySignedCommitmentStream, + finality_proof_stream: BeefyVersionedFinalityProofStream, beefy_best_block: Arc>>, executor: SubscriptionTaskExecutor, } @@ -112,7 +112,7 @@ where { /// Creates a new Beefy Rpc handler instance. pub fn new( - signed_commitment_stream: BeefySignedCommitmentStream, + finality_proof_stream: BeefyVersionedFinalityProofStream, best_block_stream: BeefyBestBlockStream, executor: SubscriptionTaskExecutor, ) -> Result { @@ -126,28 +126,28 @@ where }); executor.spawn("substrate-rpc-subscription", Some("rpc"), future.map(drop).boxed()); - Ok(Self { signed_commitment_stream, beefy_best_block, executor }) + Ok(Self { finality_proof_stream, beefy_best_block, executor }) } } #[async_trait] -impl BeefyApiServer for Beefy +impl BeefyApiServer + for Beefy where Block: BlockT, { - fn subscribe_justifications(&self, pending: PendingSubscription) { + fn subscribe_justifications(&self, mut sink: SubscriptionSink) -> SubscriptionResult { let stream = self - .signed_commitment_stream + .finality_proof_stream .subscribe() - .map(|sc| notification::EncodedSignedCommitment::new::(sc)); + .map(|vfp| notification::EncodedVersionedFinalityProof::new::(vfp)); let fut = async move { - if let Some(mut sink) = pending.accept() { - sink.pipe_from_stream(stream).await; - } + sink.pipe_from_stream(stream).await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + Ok(()) } async fn latest_finalized(&self) -> RpcResult { @@ -164,31 +164,32 @@ where mod tests { use super::*; - use beefy_gadget::notification::{ - BeefyBestBlockStream, BeefySignedCommitment, BeefySignedCommitmentSender, + use beefy_gadget::{ + justification::BeefyVersionedFinalityProof, + notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofSender}, }; - use beefy_primitives::{known_payload_ids, Payload}; + use beefy_primitives::{known_payload_ids, Payload, SignedCommitment}; use codec::{Decode, Encode}; use jsonrpsee::{types::EmptyParams, RpcModule}; use sp_runtime::traits::{BlakeTwo256, Hash}; use substrate_test_runtime_client::runtime::Block; - fn setup_io_handler() -> (RpcModule>, BeefySignedCommitmentSender) { + fn setup_io_handler() -> (RpcModule>, BeefyVersionedFinalityProofSender) { let (_, stream) = BeefyBestBlockStream::::channel(); setup_io_handler_with_best_block_stream(stream) } fn setup_io_handler_with_best_block_stream( best_block_stream: BeefyBestBlockStream, - ) -> (RpcModule>, BeefySignedCommitmentSender) { - let (commitment_sender, commitment_stream) = - BeefySignedCommitmentStream::::channel(); + ) -> (RpcModule>, BeefyVersionedFinalityProofSender) { + let (finality_proof_sender, finality_proof_stream) = + BeefyVersionedFinalityProofStream::::channel(); let handler = - Beefy::new(commitment_stream, best_block_stream, sc_rpc::testing::test_executor()) + Beefy::new(finality_proof_stream, best_block_stream, sc_rpc::testing::test_executor()) .expect("Setting up the BEEFY RPC handler works"); - (handler.into_rpc(), commitment_sender) + (handler.into_rpc(), finality_proof_sender) } #[tokio::test] @@ -196,9 +197,9 @@ mod tests { let (rpc, _) = setup_io_handler(); let request = r#"{"jsonrpc":"2.0","method":"beefy_getFinalizedHead","params":[],"id":1}"#; let expected_response = r#"{"jsonrpc":"2.0","error":{"code":1,"message":"BEEFY RPC endpoint not ready"},"id":1}"#.to_string(); - let (result, _) = rpc.raw_json_request(&request).await.unwrap(); + let (response, _) = rpc.raw_json_request(&request).await.unwrap(); - assert_eq!(expected_response, result,); + assert_eq!(expected_response, response.result); } #[tokio::test] @@ -228,8 +229,8 @@ mod tests { let deadline = std::time::Instant::now() + std::time::Duration::from_secs(2); while std::time::Instant::now() < deadline { let (response, _) = io.raw_json_request(request).await.expect("RPC requests work"); - if response != not_ready { - assert_eq!(response, expected); + if response.result != not_ready { + assert_eq!(response.result, expected); // Success return } @@ -259,24 +260,24 @@ mod tests { .unwrap(); let expected = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; - assert_eq!(response, expected); + assert_eq!(response.result, expected); } - fn create_commitment() -> BeefySignedCommitment { + fn create_finality_proof() -> BeefyVersionedFinalityProof { let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); - BeefySignedCommitment:: { + BeefyVersionedFinalityProof::::V1(SignedCommitment { commitment: beefy_primitives::Commitment { payload, block_number: 5, validator_set_id: 0, }, signatures: vec![], - } + }) } #[tokio::test] async fn subscribe_and_listen_to_one_justification() { - let (rpc, commitment_sender) = setup_io_handler(); + let (rpc, finality_proof_sender) = setup_io_handler(); // Subscribe let mut sub = rpc @@ -284,16 +285,16 @@ mod tests { .await .unwrap(); - // Notify with commitment - let commitment = create_commitment(); - let r: Result<(), ()> = commitment_sender.notify(|| Ok(commitment.clone())); + // Notify with finality_proof + let finality_proof = create_finality_proof(); + let r: Result<(), ()> = finality_proof_sender.notify(|| Ok(finality_proof.clone())); r.unwrap(); // Inspect what we received let (bytes, recv_sub_id) = sub.next::().await.unwrap().unwrap(); - let recv_commitment: BeefySignedCommitment = + let recv_finality_proof: BeefyVersionedFinalityProof = Decode::decode(&mut &bytes[..]).unwrap(); assert_eq!(&recv_sub_id, sub.subscription_id()); - assert_eq!(recv_commitment, commitment); + assert_eq!(recv_finality_proof, finality_proof); } } diff --git a/client/beefy/rpc/src/notification.rs b/client/beefy/rpc/src/notification.rs index 2f58c7c6bb5dc..a815425644d52 100644 --- a/client/beefy/rpc/src/notification.rs +++ b/client/beefy/rpc/src/notification.rs @@ -21,19 +21,19 @@ use serde::{Deserialize, Serialize}; use sp_runtime::traits::Block as BlockT; -/// An encoded signed commitment proving that the given header has been finalized. +/// An encoded finality proof proving that the given header has been finalized. /// The given bytes should be the SCALE-encoded representation of a -/// `beefy_primitives::SignedCommitment`. +/// `beefy_primitives::VersionedFinalityProof`. #[derive(Clone, Serialize, Deserialize)] -pub struct EncodedSignedCommitment(sp_core::Bytes); +pub struct EncodedVersionedFinalityProof(sp_core::Bytes); -impl EncodedSignedCommitment { +impl EncodedVersionedFinalityProof { pub fn new( - signed_commitment: beefy_gadget::notification::BeefySignedCommitment, + finality_proof: beefy_gadget::justification::BeefyVersionedFinalityProof, ) -> Self where Block: BlockT, { - EncodedSignedCommitment(signed_commitment.encode().into()) + EncodedVersionedFinalityProof(finality_proof.encode().into()) } } diff --git a/client/beefy/src/error.rs b/client/beefy/src/error.rs index eacadeb7613a5..dd5fd649d52ce 100644 --- a/client/beefy/src/error.rs +++ b/client/beefy/src/error.rs @@ -24,8 +24,12 @@ use std::fmt::Debug; #[derive(Debug, thiserror::Error, PartialEq)] pub enum Error { + #[error("Backend: {0}")] + Backend(String), #[error("Keystore error: {0}")] Keystore(String), #[error("Signature error: {0}")] Signature(String), + #[error("Session uninitialized")] + UninitSession, } diff --git a/client/beefy/src/import.rs b/client/beefy/src/import.rs new file mode 100644 index 0000000000000..129484199de89 --- /dev/null +++ b/client/beefy/src/import.rs @@ -0,0 +1,201 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use beefy_primitives::{BeefyApi, BEEFY_ENGINE_ID}; +use codec::Encode; +use log::error; +use std::{collections::HashMap, sync::Arc}; + +use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_blockchain::{well_known_cache_keys, HeaderBackend}; +use sp_consensus::Error as ConsensusError; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, + EncodedJustification, +}; + +use sc_client_api::backend::Backend; +use sc_consensus::{BlockCheckParams, BlockImport, BlockImportParams, ImportResult}; + +use crate::{ + justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, + notification::BeefyVersionedFinalityProofSender, +}; + +/// A block-import handler for BEEFY. +/// +/// This scans each imported block for BEEFY justifications and verifies them. +/// Wraps a `inner: BlockImport` and ultimately defers to it. +/// +/// When using BEEFY, the block import worker should be using this block import object. +pub struct BeefyBlockImport { + backend: Arc, + runtime: Arc, + inner: I, + justification_sender: BeefyVersionedFinalityProofSender, +} + +impl Clone for BeefyBlockImport { + fn clone(&self) -> Self { + BeefyBlockImport { + backend: self.backend.clone(), + runtime: self.runtime.clone(), + inner: self.inner.clone(), + justification_sender: self.justification_sender.clone(), + } + } +} + +impl BeefyBlockImport { + /// Create a new BeefyBlockImport. + pub fn new( + backend: Arc, + runtime: Arc, + inner: I, + justification_sender: BeefyVersionedFinalityProofSender, + ) -> BeefyBlockImport { + BeefyBlockImport { backend, runtime, inner, justification_sender } + } +} + +impl BeefyBlockImport +where + Block: BlockT, + BE: Backend, + Runtime: ProvideRuntimeApi, + Runtime::Api: BeefyApi + Send + Sync, +{ + fn decode_and_verify( + &self, + encoded: &EncodedJustification, + number: NumberFor, + hash: ::Hash, + ) -> Result, ConsensusError> { + let block_id = BlockId::hash(hash); + let validator_set = self + .runtime + .runtime_api() + .validator_set(&block_id) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .ok_or_else(|| ConsensusError::ClientImport("Unknown validator set".to_string()))?; + + decode_and_verify_finality_proof::(&encoded[..], number, &validator_set) + } + + /// Import BEEFY justification: Send it to worker for processing and also append it to backend. + /// + /// This function assumes: + /// - `justification` is verified and valid, + /// - the block referred by `justification` has been imported _and_ finalized. + fn import_beefy_justification_unchecked( + &self, + number: NumberFor, + justification: BeefyVersionedFinalityProof, + ) { + // Append the justification to the block in the backend. + if let Err(e) = self.backend.append_justification( + BlockId::Number(number), + (BEEFY_ENGINE_ID, justification.encode()), + ) { + error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, justification); + } + // Send the justification to the BEEFY voter for processing. + self.justification_sender + .notify(|| Ok::<_, ()>(justification)) + .expect("forwards closure result; the closure always returns Ok; qed."); + } +} + +#[async_trait::async_trait] +impl BlockImport for BeefyBlockImport +where + Block: BlockT, + BE: Backend, + I: BlockImport< + Block, + Error = ConsensusError, + Transaction = sp_api::TransactionFor, + > + Send + + Sync, + Runtime: ProvideRuntimeApi + Send + Sync, + Runtime::Api: BeefyApi, +{ + type Error = ConsensusError; + type Transaction = TransactionFor; + + async fn import_block( + &mut self, + mut block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let number = *block.header.number(); + + let beefy_proof = block + .justifications + .as_mut() + .and_then(|just| { + let decoded = just + .get(BEEFY_ENGINE_ID) + .map(|encoded| self.decode_and_verify(encoded, number, hash)); + // Remove BEEFY justification from the list before giving to `inner`; + // we will append it to backend ourselves at the end if all goes well. + just.remove(BEEFY_ENGINE_ID); + decoded + }) + .transpose() + .unwrap_or(None); + + // Run inner block import. + let inner_import_result = self.inner.import_block(block, new_cache).await?; + + match (beefy_proof, &inner_import_result) { + (Some(proof), ImportResult::Imported(_)) => { + let status = self.backend.blockchain().info(); + if number <= status.finalized_number && + Some(hash) == + self.backend + .blockchain() + .hash(number) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + { + // The proof is valid and the block is imported and final, we can import. + self.import_beefy_justification_unchecked(number, proof); + } else { + error!( + target: "beefy", + "🥩 Cannot import justification: {:?} for, not yet final, block number {:?}", + proof, + number, + ); + } + }, + _ => (), + } + + Ok(inner_import_result) + } + + async fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.inner.check_block(block).await + } +} diff --git a/client/beefy/src/justification.rs b/client/beefy/src/justification.rs new file mode 100644 index 0000000000000..d9be18593dac7 --- /dev/null +++ b/client/beefy/src/justification.rs @@ -0,0 +1,188 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::keystore::BeefyKeystore; +use beefy_primitives::{ + crypto::{AuthorityId, Signature}, + ValidatorSet, VersionedFinalityProof, +}; +use codec::{Decode, Encode}; +use sp_consensus::Error as ConsensusError; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +/// A finality proof with matching BEEFY authorities' signatures. +pub type BeefyVersionedFinalityProof = + beefy_primitives::VersionedFinalityProof, Signature>; + +/// Decode and verify a Beefy FinalityProof. +pub(crate) fn decode_and_verify_finality_proof( + encoded: &[u8], + target_number: NumberFor, + validator_set: &ValidatorSet, +) -> Result, ConsensusError> { + let proof = >::decode(&mut &*encoded) + .map_err(|_| ConsensusError::InvalidJustification)?; + verify_with_validator_set::(target_number, validator_set, &proof).map(|_| proof) +} + +/// Verify the Beefy finality proof against the validator set at the block it was generated. +fn verify_with_validator_set( + target_number: NumberFor, + validator_set: &ValidatorSet, + proof: &BeefyVersionedFinalityProof, +) -> Result<(), ConsensusError> { + match proof { + VersionedFinalityProof::V1(signed_commitment) => { + if signed_commitment.signatures.len() != validator_set.len() || + signed_commitment.commitment.validator_set_id != validator_set.id() || + signed_commitment.commitment.block_number != target_number + { + return Err(ConsensusError::InvalidJustification) + } + + // Arrangement of signatures in the commitment should be in the same order + // as validators for that set. + let message = signed_commitment.commitment.encode(); + let valid_signatures = validator_set + .validators() + .into_iter() + .zip(signed_commitment.signatures.iter()) + .filter(|(id, signature)| { + signature + .as_ref() + .map(|sig| BeefyKeystore::verify(id, sig, &message[..])) + .unwrap_or(false) + }) + .count(); + if valid_signatures >= crate::round::threshold(validator_set.len()) { + Ok(()) + } else { + Err(ConsensusError::InvalidJustification) + } + }, + } +} + +#[cfg(test)] +pub(crate) mod tests { + use beefy_primitives::{ + known_payload_ids, Commitment, Payload, SignedCommitment, VersionedFinalityProof, + }; + use substrate_test_runtime_client::runtime::Block; + + use super::*; + use crate::{keystore::tests::Keyring, tests::make_beefy_ids}; + + pub(crate) fn new_finality_proof( + block_num: NumberFor, + validator_set: &ValidatorSet, + keys: &[Keyring], + ) -> BeefyVersionedFinalityProof { + let commitment = Commitment { + payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + block_number: block_num, + validator_set_id: validator_set.id(), + }; + let message = commitment.encode(); + let signatures = keys.iter().map(|key| Some(key.sign(&message))).collect(); + VersionedFinalityProof::V1(SignedCommitment { commitment, signatures }) + } + + #[test] + fn should_verify_with_validator_set() { + let keys = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + + // build valid justification + let block_num = 42; + let proof = new_finality_proof(block_num, &validator_set, keys); + + let good_proof = proof.clone().into(); + // should verify successfully + verify_with_validator_set::(block_num, &validator_set, &good_proof).unwrap(); + + // wrong block number -> should fail verification + let good_proof = proof.clone().into(); + match verify_with_validator_set::(block_num + 1, &validator_set, &good_proof) { + Err(ConsensusError::InvalidJustification) => (), + _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + }; + + // wrong validator set id -> should fail verification + let good_proof = proof.clone().into(); + let other = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); + match verify_with_validator_set::(block_num, &other, &good_proof) { + Err(ConsensusError::InvalidJustification) => (), + _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + }; + + // wrong signatures length -> should fail verification + let mut bad_proof = proof.clone(); + // change length of signatures + let bad_signed_commitment = match bad_proof { + VersionedFinalityProof::V1(ref mut sc) => sc, + }; + bad_signed_commitment.signatures.pop().flatten().unwrap(); + match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { + Err(ConsensusError::InvalidJustification) => (), + _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + }; + + // not enough signatures -> should fail verification + let mut bad_proof = proof.clone(); + let bad_signed_commitment = match bad_proof { + VersionedFinalityProof::V1(ref mut sc) => sc, + }; + // remove a signature (but same length) + *bad_signed_commitment.signatures.first_mut().unwrap() = None; + match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { + Err(ConsensusError::InvalidJustification) => (), + _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + }; + + // not enough _correct_ signatures -> should fail verification + let mut bad_proof = proof.clone(); + let bad_signed_commitment = match bad_proof { + VersionedFinalityProof::V1(ref mut sc) => sc, + }; + // change a signature to a different key + *bad_signed_commitment.signatures.first_mut().unwrap() = + Some(Keyring::Dave.sign(&bad_signed_commitment.commitment.encode())); + match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { + Err(ConsensusError::InvalidJustification) => (), + _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + }; + } + + #[test] + fn should_decode_and_verify_finality_proof() { + let keys = &[Keyring::Alice, Keyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + let block_num = 1; + + // build valid justification + let proof = new_finality_proof(block_num, &validator_set, keys); + let versioned_proof: BeefyVersionedFinalityProof = proof.into(); + let encoded = versioned_proof.encode(); + + // should successfully decode and verify + let verified = + decode_and_verify_finality_proof::(&encoded, block_num, &validator_set).unwrap(); + assert_eq!(verified, versioned_proof); + } +} diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index c025ec5686ad2..bdf44e056786c 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -16,23 +16,18 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::sync::Arc; - +use beefy_primitives::{BeefyApi, MmrRootHash}; use prometheus::Registry; - use sc_client_api::{Backend, BlockchainEvents, Finalizer}; +use sc_consensus::BlockImport; use sc_network_gossip::Network as GossipNetwork; - use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; -use sp_consensus::SyncOracle; +use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_keystore::SyncCryptoStorePtr; use sp_mmr_primitives::MmrApi; use sp_runtime::traits::Block; - -use beefy_primitives::{BeefyApi, MmrRootHash}; - -use crate::notification::{BeefyBestBlockSender, BeefySignedCommitmentSender}; +use std::sync::Arc; mod error; mod gossip; @@ -41,11 +36,21 @@ mod metrics; mod round; mod worker; +pub mod import; +pub mod justification; pub mod notification; #[cfg(test)] mod tests; +use crate::{ + import::BeefyBlockImport, + notification::{ + BeefyBestBlockSender, BeefyBestBlockStream, BeefyVersionedFinalityProofSender, + BeefyVersionedFinalityProofStream, + }, +}; + pub use beefy_protocol_name::standard_name as protocol_standard_name; pub(crate) mod beefy_protocol_name { @@ -110,6 +115,68 @@ where // empty } +/// Links between the block importer, the background voter and the RPC layer, +/// to be used by the voter. +#[derive(Clone)] +pub struct BeefyVoterLinks { + // BlockImport -> Voter links + /// Stream of BEEFY signed commitments from block import to voter. + pub from_block_import_justif_stream: BeefyVersionedFinalityProofStream, + + // Voter -> RPC links + /// Sends BEEFY signed commitments from voter to RPC. + pub to_rpc_justif_sender: BeefyVersionedFinalityProofSender, + /// Sends BEEFY best block hashes from voter to RPC. + pub to_rpc_best_block_sender: BeefyBestBlockSender, +} + +/// Links used by the BEEFY RPC layer, from the BEEFY background voter. +#[derive(Clone)] +pub struct BeefyRPCLinks { + /// Stream of signed commitments coming from the voter. + pub from_voter_justif_stream: BeefyVersionedFinalityProofStream, + /// Stream of BEEFY best block hashes coming from the voter. + pub from_voter_best_beefy_stream: BeefyBestBlockStream, +} + +/// Make block importer and link half necessary to tie the background voter to it. +pub fn beefy_block_import_and_links( + wrapped_block_import: I, + backend: Arc, + runtime: Arc, +) -> (BeefyBlockImport, BeefyVoterLinks, BeefyRPCLinks) +where + B: Block, + BE: Backend, + I: BlockImport> + + Send + + Sync, + RuntimeApi: ProvideRuntimeApi + Send + Sync, + RuntimeApi::Api: BeefyApi, +{ + // Voter -> RPC links + let (to_rpc_justif_sender, from_voter_justif_stream) = + notification::BeefyVersionedFinalityProofStream::::channel(); + let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = + notification::BeefyBestBlockStream::::channel(); + + // BlockImport -> Voter links + let (to_voter_justif_sender, from_block_import_justif_stream) = + notification::BeefyVersionedFinalityProofStream::::channel(); + + // BlockImport + let import = + BeefyBlockImport::new(backend, runtime, wrapped_block_import, to_voter_justif_sender); + let voter_links = BeefyVoterLinks { + from_block_import_justif_stream, + to_rpc_justif_sender, + to_rpc_best_block_sender, + }; + let rpc_links = BeefyRPCLinks { from_voter_best_beefy_stream, from_voter_justif_stream }; + + (import, voter_links, rpc_links) +} + /// BEEFY gadget initialization parameters. pub struct BeefyParams where @@ -130,16 +197,14 @@ where pub key_store: Option, /// Gossip network pub network: N, - /// BEEFY signed commitment sender - pub signed_commitment_sender: BeefySignedCommitmentSender, - /// BEEFY best block sender - pub beefy_best_block_sender: BeefyBestBlockSender, /// Minimal delta between blocks, BEEFY should vote for pub min_block_delta: u32, /// Prometheus metric registry pub prometheus_registry: Option, /// Chain specific GRANDPA protocol name. See [`beefy_protocol_name::standard_name`]. pub protocol_name: std::borrow::Cow<'static, str>, + /// Links between the block importer, the background voter and the RPC layer. + pub links: BeefyVoterLinks, } /// Start the BEEFY gadget. @@ -160,11 +225,10 @@ where runtime, key_store, network, - signed_commitment_sender, - beefy_best_block_sender, min_block_delta, prometheus_registry, protocol_name, + links, } = beefy_params; let sync_oracle = network.clone(); @@ -194,14 +258,13 @@ where client, backend, runtime, + sync_oracle, key_store: key_store.into(), - signed_commitment_sender, - beefy_best_block_sender, gossip_engine, gossip_validator, - min_block_delta, + links, metrics, - sync_oracle, + min_block_delta, }; let worker = worker::BeefyWorker::<_, _, _, _, _>::new(worker_params); diff --git a/client/beefy/src/metrics.rs b/client/beefy/src/metrics.rs index a6d29dbf88abb..71e34e24c4fa0 100644 --- a/client/beefy/src/metrics.rs +++ b/client/beefy/src/metrics.rs @@ -32,8 +32,8 @@ pub(crate) struct Metrics { pub beefy_best_block: Gauge, /// Next block BEEFY should vote on pub beefy_should_vote_on: Gauge, - /// Number of sessions without a signed commitment - pub beefy_skipped_sessions: Counter, + /// Number of sessions with lagging signed commitment on mandatory block + pub beefy_lagging_sessions: Counter, } impl Metrics { @@ -65,10 +65,10 @@ impl Metrics { Gauge::new("substrate_beefy_should_vote_on", "Next block, BEEFY should vote on")?, registry, )?, - beefy_skipped_sessions: register( + beefy_lagging_sessions: register( Counter::new( - "substrate_beefy_skipped_sessions", - "Number of sessions without a signed commitment", + "substrate_beefy_lagging_sessions", + "Number of sessions with lagging signed commitment on mandatory block", )?, registry, )?, diff --git a/client/beefy/src/notification.rs b/client/beefy/src/notification.rs index 7c18d809f6efb..c673115e487f3 100644 --- a/client/beefy/src/notification.rs +++ b/client/beefy/src/notification.rs @@ -17,11 +17,9 @@ // along with this program. If not, see . use sc_utils::notification::{NotificationSender, NotificationStream, TracingKeyStr}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_runtime::traits::Block as BlockT; -/// A commitment with matching BEEFY authorities' signatures. -pub type BeefySignedCommitment = - beefy_primitives::SignedCommitment, beefy_primitives::crypto::Signature>; +use crate::justification::BeefyVersionedFinalityProof; /// The sending half of the notifications channel(s) used to send /// notifications about best BEEFY block from the gadget side. @@ -33,13 +31,14 @@ pub type BeefyBestBlockStream = NotificationStream<::Hash, BeefyBestBlockTracingKey>; /// The sending half of the notifications channel(s) used to send notifications -/// about signed commitments generated at the end of a BEEFY round. -pub type BeefySignedCommitmentSender = NotificationSender>; +/// about versioned finality proof generated at the end of a BEEFY round. +pub type BeefyVersionedFinalityProofSender = + NotificationSender>; /// The receiving half of a notifications channel used to receive notifications -/// about signed commitments generated at the end of a BEEFY round. -pub type BeefySignedCommitmentStream = - NotificationStream, BeefySignedCommitmentTracingKey>; +/// about versioned finality proof generated at the end of a BEEFY round. +pub type BeefyVersionedFinalityProofStream = + NotificationStream, BeefyVersionedFinalityProofTracingKey>; /// Provides tracing key for BEEFY best block stream. #[derive(Clone)] @@ -48,9 +47,9 @@ impl TracingKeyStr for BeefyBestBlockTracingKey { const TRACING_KEY: &'static str = "mpsc_beefy_best_block_notification_stream"; } -/// Provides tracing key for BEEFY signed commitments stream. +/// Provides tracing key for BEEFY versioned finality proof stream. #[derive(Clone)] -pub struct BeefySignedCommitmentTracingKey; -impl TracingKeyStr for BeefySignedCommitmentTracingKey { - const TRACING_KEY: &'static str = "mpsc_beefy_signed_commitments_notification_stream"; +pub struct BeefyVersionedFinalityProofTracingKey; +impl TracingKeyStr for BeefyVersionedFinalityProofTracingKey { + const TRACING_KEY: &'static str = "mpsc_beefy_versioned_finality_proof_notification_stream"; } diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index fecb9557df6ea..762a8f7e5d544 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -59,7 +59,8 @@ impl RoundTracker { } } -fn threshold(authorities: usize) -> usize { +/// Minimum size of `authorities` subset that produced valid signatures for a block to finalize. +pub fn threshold(authorities: usize) -> usize { let faulty = authorities.saturating_sub(1) / 3; authorities - faulty } @@ -70,9 +71,10 @@ fn threshold(authorities: usize) -> usize { /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). pub(crate) struct Rounds { rounds: BTreeMap<(Payload, NumberFor), RoundTracker>, - best_done: Option>, session_start: NumberFor, validator_set: ValidatorSet, + mandatory_done: bool, + best_done: Option>, } impl Rounds @@ -81,15 +83,15 @@ where B: Block, { pub(crate) fn new(session_start: NumberFor, validator_set: ValidatorSet) -> Self { - Rounds { rounds: BTreeMap::new(), best_done: None, session_start, validator_set } + Rounds { + rounds: BTreeMap::new(), + session_start, + validator_set, + mandatory_done: false, + best_done: None, + } } -} -impl Rounds -where - P: Ord + Hash + Clone, - B: Block, -{ pub(crate) fn validator_set_id(&self) -> ValidatorSetId { self.validator_set.id() } @@ -98,12 +100,16 @@ where self.validator_set.validators() } - pub(crate) fn session_start(&self) -> &NumberFor { - &self.session_start + pub(crate) fn session_start(&self) -> NumberFor { + self.session_start + } + + pub(crate) fn mandatory_done(&self) -> bool { + self.mandatory_done } pub(crate) fn should_self_vote(&self, round: &(P, NumberFor)) -> bool { - Some(round.1.clone()) > self.best_done && + Some(round.1) > self.best_done && self.rounds.get(round).map(|tracker| !tracker.has_self_vote()).unwrap_or(true) } @@ -113,12 +119,9 @@ where vote: (Public, Signature), self_vote: bool, ) -> bool { - if Some(round.1.clone()) <= self.best_done { - debug!( - target: "beefy", - "🥩 received vote for old stale round {:?}, ignoring", - round.1 - ); + let num = round.1; + if num < self.session_start || Some(num) <= self.best_done { + debug!(target: "beefy", "🥩 received vote for old stale round {:?}, ignoring", num); false } else if !self.validators().iter().any(|id| vote.0 == *id) { debug!( @@ -147,6 +150,7 @@ where // remove this and older (now stale) rounds let signatures = self.rounds.remove(round)?.votes; self.rounds.retain(|&(_, number), _| number > round.1); + self.mandatory_done = self.mandatory_done || round.1 == self.session_start; self.best_done = self.best_done.max(Some(round.1)); debug!(target: "beefy", "🥩 Concluded round #{}", round.1); @@ -160,6 +164,11 @@ where None } } + + #[cfg(test)] + pub(crate) fn test_set_mandatory_done(&mut self, done: bool) { + self.mandatory_done = done; + } } #[cfg(test)] @@ -226,7 +235,7 @@ mod tests { let rounds = Rounds::::new(session_start, validators); assert_eq!(42, rounds.validator_set_id()); - assert_eq!(1, *rounds.session_start()); + assert_eq!(1, rounds.session_start()); assert_eq!( &vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], rounds.validators() @@ -307,6 +316,43 @@ mod tests { )); } + #[test] + fn old_rounds_not_accepted() { + sp_tracing::try_init_simple(); + + let validators = ValidatorSet::::new( + vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], + 42, + ) + .unwrap(); + let alice = (Keyring::Alice.public(), Keyring::Alice.sign(b"I am committed")); + + let session_start = 10u64.into(); + let mut rounds = Rounds::::new(session_start, validators); + + let mut vote = (H256::from_low_u64_le(1), 9); + // add vote for previous session, should fail + assert!(!rounds.add_vote(&vote, alice.clone(), true)); + // no votes present + assert!(rounds.rounds.is_empty()); + + // simulate 11 was concluded + rounds.best_done = Some(11); + // add votes for current session, but already concluded rounds, should fail + vote.1 = 10; + assert!(!rounds.add_vote(&vote, alice.clone(), true)); + vote.1 = 11; + assert!(!rounds.add_vote(&vote, alice.clone(), true)); + // no votes present + assert!(rounds.rounds.is_empty()); + + // add good vote + vote.1 = 12; + assert!(rounds.add_vote(&vote, alice, true)); + // good vote present + assert_eq!(rounds.rounds.len(), 1); + } + #[test] fn multiple_rounds() { sp_tracing::try_init_simple(); diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index b5ff27c808908..f0257d179cb33 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -21,14 +21,16 @@ use futures::{future, stream::FuturesUnordered, Future, StreamExt}; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; -use std::{sync::Arc, task::Poll}; +use std::{collections::HashMap, sync::Arc, task::Poll}; use tokio::{runtime::Runtime, time::Duration}; use sc_chain_spec::{ChainSpec, GenericChainSpec}; use sc_client_api::HeaderBackend; -use sc_consensus::BoxJustificationImport; +use sc_consensus::{ + BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, + ImportedAux, +}; use sc_keystore::LocalKeystore; -use sc_network::config::ProtocolConfig; use sc_network_test::{ Block, BlockImportAdapter, FullPeerConfig, PassThroughVerifier, Peer, PeersClient, TestNetFactory, @@ -36,7 +38,8 @@ use sc_network_test::{ use sc_utils::notification::NotificationReceiver; use beefy_primitives::{ - crypto::AuthorityId, BeefyApi, ConsensusLog, MmrRootHash, ValidatorSet, BEEFY_ENGINE_ID, + crypto::{AuthorityId, Signature}, + BeefyApi, ConsensusLog, MmrRootHash, ValidatorSet, VersionedFinalityProof, BEEFY_ENGINE_ID, KEY_TYPE as BeefyKeyType, }; use sp_mmr_primitives::{ @@ -48,19 +51,32 @@ use sp_consensus::BlockOrigin; use sp_core::H256; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ - codec::Encode, generic::BlockId, traits::Header as HeaderT, BuildStorage, DigestItem, Storage, + codec::Encode, + generic::BlockId, + traits::{Header as HeaderT, NumberFor}, + BuildStorage, DigestItem, Justifications, Storage, }; use substrate_test_runtime_client::{runtime::Header, ClientExt}; -use crate::{beefy_protocol_name, keystore::tests::Keyring as BeefyKeyring, notification::*}; +use crate::{ + beefy_block_import_and_links, beefy_protocol_name, justification::*, + keystore::tests::Keyring as BeefyKeyring, BeefyRPCLinks, BeefyVoterLinks, +}; pub(crate) const BEEFY_PROTOCOL_NAME: &'static str = "/beefy/1"; const GOOD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0xbf); const BAD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0x42); +type BeefyBlockImport = crate::BeefyBlockImport< + Block, + substrate_test_runtime_client::Backend, + two_validators::TestApi, + BlockImportAdapter>, +>; + pub(crate) type BeefyValidatorSet = ValidatorSet; -pub(crate) type BeefyPeer = Peer; +pub(crate) type BeefyPeer = Peer; #[derive(Debug, Serialize, Deserialize)] struct Genesis(std::collections::BTreeMap); @@ -98,19 +114,13 @@ fn beefy_protocol_name() { assert_eq!(proto_name.to_string(), expected); } -// TODO: compiler warns us about unused `signed_commitment_stream`, will use in later tests -#[allow(dead_code)] -#[derive(Clone)] -pub(crate) struct BeefyLinkHalf { - pub signed_commitment_stream: BeefySignedCommitmentStream, - pub beefy_best_block_stream: BeefyBestBlockStream, -} - #[derive(Default)] pub(crate) struct PeerData { - pub(crate) beefy_link_half: Mutex>, + pub(crate) beefy_rpc_links: Mutex>>, + pub(crate) beefy_voter_links: Mutex>>, } +#[derive(Default)] pub(crate) struct BeefyTestNet { peers: Vec, } @@ -163,20 +173,10 @@ impl BeefyTestNet { impl TestNetFactory for BeefyTestNet { type Verifier = PassThroughVerifier; - type BlockImport = PeersClient; + type BlockImport = BeefyBlockImport; type PeerData = PeerData; - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - BeefyTestNet { peers: Vec::new() } - } - - fn make_verifier( - &self, - _client: PeersClient, - _cfg: &ProtocolConfig, - _: &PeerData, - ) -> Self::Verifier { + fn make_verifier(&self, _client: PeersClient, _: &PeerData) -> Self::Verifier { PassThroughVerifier::new(false) // use non-instant finality. } @@ -188,7 +188,17 @@ impl TestNetFactory for BeefyTestNet { Option>, Self::PeerData, ) { - (client.as_block_import(), None, PeerData::default()) + let inner = BlockImportAdapter::new(client.clone()); + let (block_import, voter_links, rpc_links) = beefy_block_import_and_links( + inner, + client.as_backend(), + Arc::new(two_validators::TestApi {}), + ); + let peer_data = PeerData { + beefy_rpc_links: Mutex::new(Some(rpc_links)), + beefy_voter_links: Mutex::new(Some(voter_links)), + }; + (BlockImportAdapter::new(block_import), None, peer_data) } fn peer(&mut self, i: usize) -> &mut BeefyPeer { @@ -316,7 +326,7 @@ fn add_auth_change_digest(header: &mut Header, new_auth_set: BeefyValidatorSet) } pub(crate) fn make_beefy_ids(keys: &[BeefyKeyring]) -> Vec { - keys.iter().map(|key| key.clone().public().into()).collect() + keys.iter().map(|&key| key.public().into()).collect() } pub(crate) fn create_beefy_keystore(authority: BeefyKeyring) -> SyncCryptoStorePtr { @@ -343,12 +353,12 @@ where let keystore = create_beefy_keystore(*key); - let (signed_commitment_sender, signed_commitment_stream) = - BeefySignedCommitmentStream::::channel(); - let (beefy_best_block_sender, beefy_best_block_stream) = - BeefyBestBlockStream::::channel(); - let beefy_link_half = BeefyLinkHalf { signed_commitment_stream, beefy_best_block_stream }; - *peer.data.beefy_link_half.lock() = Some(beefy_link_half); + let (_, _, peer_data) = net.make_block_import(peer.client().clone()); + let PeerData { beefy_rpc_links, beefy_voter_links } = peer_data; + + let beefy_voter_links = beefy_voter_links.lock().take(); + *peer.data.beefy_rpc_links.lock() = beefy_rpc_links.lock().take(); + *peer.data.beefy_voter_links.lock() = beefy_voter_links.clone(); let beefy_params = crate::BeefyParams { client: peer.client().as_client(), @@ -356,8 +366,7 @@ where runtime: api.clone(), key_store: Some(keystore), network: peer.network_service().clone(), - signed_commitment_sender, - beefy_best_block_sender, + links: beefy_voter_links.unwrap(), min_block_delta, prometheus_registry: None, protocol_name: BEEFY_PROTOCOL_NAME.into(), @@ -388,17 +397,18 @@ fn run_for(duration: Duration, net: &Arc>, runtime: &mut Run pub(crate) fn get_beefy_streams( net: &mut BeefyTestNet, peers: &[BeefyKeyring], -) -> (Vec>, Vec>>) { +) -> (Vec>, Vec>>) +{ let mut best_block_streams = Vec::new(); - let mut signed_commitment_streams = Vec::new(); + let mut versioned_finality_proof_streams = Vec::new(); for peer_id in 0..peers.len() { - let beefy_link_half = - net.peer(peer_id).data.beefy_link_half.lock().as_ref().unwrap().clone(); - let BeefyLinkHalf { signed_commitment_stream, beefy_best_block_stream } = beefy_link_half; - best_block_streams.push(beefy_best_block_stream.subscribe()); - signed_commitment_streams.push(signed_commitment_stream.subscribe()); + let beefy_rpc_links = net.peer(peer_id).data.beefy_rpc_links.lock().clone().unwrap(); + let BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream } = + beefy_rpc_links; + best_block_streams.push(from_voter_best_beefy_stream.subscribe()); + versioned_finality_proof_streams.push(from_voter_justif_stream.subscribe()); } - (best_block_streams, signed_commitment_streams) + (best_block_streams, versioned_finality_proof_streams) } fn wait_for_best_beefy_blocks( @@ -428,7 +438,7 @@ fn wait_for_best_beefy_blocks( } fn wait_for_beefy_signed_commitments( - streams: Vec>>, + streams: Vec>>, net: &Arc>, runtime: &mut Runtime, expected_commitment_block_nums: &[u64], @@ -437,9 +447,12 @@ fn wait_for_beefy_signed_commitments( let len = expected_commitment_block_nums.len(); streams.into_iter().for_each(|stream| { let mut expected = expected_commitment_block_nums.iter(); - wait_for.push(Box::pin(stream.take(len).for_each(move |signed_commitment| { + wait_for.push(Box::pin(stream.take(len).for_each(move |versioned_finality_proof| { let expected = expected.next(); async move { + let signed_commitment = match versioned_finality_proof { + beefy_primitives::VersionedFinalityProof::V1(sc) => sc, + }; let commitment_block_num = signed_commitment.commitment.block_number; assert_eq!(expected, Some(commitment_block_num).as_ref()); // TODO: also verify commitment payload, validator set id, and signatures. @@ -477,7 +490,7 @@ fn finalize_block_and_wait_for_beefy( finalize_targets: &[u64], expected_beefy: &[u64], ) { - let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); for block in finalize_targets { let finalize = BlockId::number(*block); @@ -490,11 +503,11 @@ fn finalize_block_and_wait_for_beefy( // run for quarter second then verify no new best beefy block available let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, runtime, timeout); - streams_empty_after_timeout(signed_commitments, &net, runtime, None); + streams_empty_after_timeout(versioned_finality_proof, &net, runtime, None); } else { // run until expected beefy blocks are received wait_for_best_beefy_blocks(best_blocks, &net, runtime, expected_beefy); - wait_for_beefy_signed_commitments(signed_commitments, &net, runtime, expected_beefy); + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, runtime, expected_beefy); } } @@ -565,19 +578,19 @@ fn lagging_validators() { // Alice finalizes #25, Bob lags behind let finalize = BlockId::number(25); - let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); - streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); + streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); // Bob catches up and also finalizes #25 - let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // expected beefy finalizes block #17 from diff-power-of-two wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[23, 24, 25]); - wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[23, 24, 25]); + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[23, 24, 25]); // Both finalize #30 (mandatory session) and #32 -> BEEFY finalize #30 (mandatory), #31, #32 finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[30, 32], &[30, 31, 32]); @@ -587,20 +600,20 @@ fn lagging_validators() { // validator set). // Alice finalizes session-boundary mandatory block #60, Bob lags behind - let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); let finalize = BlockId::number(60); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); - streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); + streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) - let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // verify beefy skips intermediary votes, and successfully finalizes mandatory block #40 wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[60]); - wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[60]); + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[60]); } #[test] @@ -638,8 +651,8 @@ fn correct_beefy_payload() { // with 3 good voters and 1 bad one, consensus should happen and best blocks produced. finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[10], &[1, 9]); - let (best_blocks, signed_commitments) = - get_beefy_streams(&mut *net.lock(), &[BeefyKeyring::Alice]); + let (best_blocks, versioned_finality_proof) = + get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); // now 2 good validators and 1 bad one are voting net.lock() @@ -664,11 +677,11 @@ fn correct_beefy_payload() { // verify consensus is _not_ reached let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); - streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); + streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); // 3rd good validator catches up and votes as well - let (best_blocks, signed_commitments) = - get_beefy_streams(&mut *net.lock(), &[BeefyKeyring::Alice]); + let (best_blocks, versioned_finality_proof) = + get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); net.lock() .peer(2) .client() @@ -678,5 +691,125 @@ fn correct_beefy_payload() { // verify consensus is reached wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[11]); - wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[11]); + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[11]); +} + +#[test] +fn beefy_importing_blocks() { + use futures::{executor::block_on, future::poll_fn, task::Poll}; + use sc_block_builder::BlockBuilderProvider; + use sc_client_api::BlockBackend; + + sp_tracing::try_init_simple(); + + let mut net = BeefyTestNet::new(2, 0); + + let client = net.peer(0).client().clone(); + let (mut block_import, _, peer_data) = net.make_block_import(client.clone()); + let PeerData { beefy_rpc_links: _, beefy_voter_links } = peer_data; + let justif_stream = beefy_voter_links.lock().take().unwrap().from_block_import_justif_stream; + + let params = |block: Block, justifications: Option| { + let mut import = BlockImportParams::new(BlockOrigin::File, block.header); + import.justifications = justifications; + import.body = Some(block.extrinsics); + import.finalized = true; + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + import + }; + + let full_client = client.as_client(); + let parent_id = BlockId::Number(0); + let block_id = BlockId::Number(1); + let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); + let block = builder.build().unwrap().block; + + // Import without justifications. + let mut justif_recv = justif_stream.subscribe(); + assert_eq!( + block_on(block_import.import_block(params(block.clone(), None), HashMap::new())).unwrap(), + ImportResult::Imported(ImportedAux { is_new_best: true, ..Default::default() }), + ); + assert_eq!( + block_on(block_import.import_block(params(block, None), HashMap::new())).unwrap(), + ImportResult::AlreadyInChain + ); + // Verify no justifications present: + { + // none in backend, + assert!(full_client.justifications(&block_id).unwrap().is_none()); + // and none sent to BEEFY worker. + block_on(poll_fn(move |cx| { + assert_eq!(justif_recv.poll_next_unpin(cx), Poll::Pending); + Poll::Ready(()) + })); + } + + // Import with valid justification. + let parent_id = BlockId::Number(1); + let block_num = 2; + let keys = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + let proof = crate::justification::tests::new_finality_proof(block_num, &validator_set, keys); + let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); + let encoded = versioned_proof.encode(); + let justif = Some(Justifications::from((BEEFY_ENGINE_ID, encoded))); + + let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); + let block = builder.build().unwrap().block; + let mut justif_recv = justif_stream.subscribe(); + assert_eq!( + block_on(block_import.import_block(params(block, justif), HashMap::new())).unwrap(), + ImportResult::Imported(ImportedAux { + bad_justification: false, + is_new_best: true, + ..Default::default() + }), + ); + // Verify justification successfully imported: + { + // available in backend, + assert!(full_client.justifications(&BlockId::Number(block_num)).unwrap().is_some()); + // and also sent to BEEFY worker. + block_on(poll_fn(move |cx| { + match justif_recv.poll_next_unpin(cx) { + Poll::Ready(Some(_justification)) => (), + v => panic!("unexpected value: {:?}", v), + } + Poll::Ready(()) + })); + } + + // Import with invalid justification (incorrect validator set). + let parent_id = BlockId::Number(2); + let block_num = 3; + let keys = &[BeefyKeyring::Alice]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); + let proof = crate::justification::tests::new_finality_proof(block_num, &validator_set, keys); + let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); + let encoded = versioned_proof.encode(); + let justif = Some(Justifications::from((BEEFY_ENGINE_ID, encoded))); + + let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); + let block = builder.build().unwrap().block; + let mut justif_recv = justif_stream.subscribe(); + assert_eq!( + block_on(block_import.import_block(params(block, justif), HashMap::new())).unwrap(), + ImportResult::Imported(ImportedAux { + // Still `false` because we don't want to fail import on bad BEEFY justifications. + bad_justification: false, + is_new_best: true, + ..Default::default() + }), + ); + // Verify bad justifications was not imported: + { + // none in backend, + assert!(full_client.justifications(&block_id).unwrap().is_none()); + // and none sent to BEEFY worker. + block_on(poll_fn(move |cx| { + assert_eq!(justif_recv.poll_next_unpin(cx), Poll::Pending); + Poll::Ready(()) + })); + } } diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 735dea0170a62..9f1938fa91c33 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -17,11 +17,10 @@ // along with this program. If not, see . use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, VecDeque}, fmt::Debug, marker::PhantomData, sync::Arc, - time::Duration, }; use codec::{Codec, Decode, Encode}; @@ -48,57 +47,174 @@ use beefy_primitives::{ }; use crate::{ - error, + error::Error, gossip::{topic, GossipValidator}, + justification::BeefyVersionedFinalityProof, keystore::BeefyKeystore, metric_inc, metric_set, metrics::Metrics, - notification::{BeefyBestBlockSender, BeefySignedCommitmentSender}, round::Rounds, - Client, + BeefyVoterLinks, Client, }; +enum RoundAction { + Drop, + Process, + Enqueue, +} + +/// Responsible for the voting strategy. +/// It chooses which incoming votes to accept and which votes to generate. +struct VoterOracle { + /// Queue of known sessions. Keeps track of voting rounds (block numbers) within each session. + /// + /// There are three voter states coresponding to three queue states: + /// 1. voter uninitialized: queue empty, + /// 2. up-to-date - all mandatory blocks leading up to current GRANDPA finalized: + /// queue has ONE element, the 'current session' where `mandatory_done == true`, + /// 3. lagging behind GRANDPA: queue has [1, N] elements, where all `mandatory_done == false`. + /// In this state, everytime a session gets its mandatory block BEEFY finalized, it's + /// popped off the queue, eventually getting to state `2. up-to-date`. + sessions: VecDeque>, + /// Min delta in block numbers between two blocks, BEEFY should vote on. + min_block_delta: u32, +} + +impl VoterOracle { + pub fn new(min_block_delta: u32) -> Self { + Self { + sessions: VecDeque::new(), + // Always target at least one block better than current best beefy. + min_block_delta: min_block_delta.max(1), + } + } + + /// Return mutable reference to rounds pertaining to first session in the queue. + /// Voting will always happen at the head of the queue. + pub fn rounds_mut(&mut self) -> Option<&mut Rounds> { + self.sessions.front_mut() + } + + /// Add new observed session to the Oracle. + pub fn add_session(&mut self, rounds: Rounds) { + self.sessions.push_back(rounds); + self.try_prune(); + } + + /// Prune the queue to keep the Oracle in one of the expected three states. + /// + /// Call this function on each BEEFY finality, + /// or at the very least on each BEEFY mandatory block finality. + pub fn try_prune(&mut self) { + if self.sessions.len() > 1 { + // when there's multiple sessions, only keep the `!mandatory_done()` ones. + self.sessions.retain(|s| !s.mandatory_done()) + } + } + + /// Return `(A, B)` tuple representing inclusive [A, B] interval of votes to accept. + pub fn accepted_interval( + &self, + best_grandpa: NumberFor, + ) -> Result<(NumberFor, NumberFor), Error> { + let rounds = self.sessions.front().ok_or(Error::UninitSession)?; + + if rounds.mandatory_done() { + // There's only one session active and its mandatory is done. + // Accept any GRANDPA finalized vote. + Ok((rounds.session_start(), best_grandpa.into())) + } else { + // There's at least one session with mandatory not done. + // Only accept votes for the mandatory block in the front of queue. + Ok((rounds.session_start(), rounds.session_start())) + } + } + + /// Utility function to quickly decide what to do for each round. + pub fn triage_round( + &self, + round: NumberFor, + best_grandpa: NumberFor, + ) -> Result { + let (start, end) = self.accepted_interval(best_grandpa)?; + if start <= round && round <= end { + Ok(RoundAction::Process) + } else if round > end { + Ok(RoundAction::Enqueue) + } else { + Ok(RoundAction::Drop) + } + } + + /// Return `Some(number)` if we should be voting on block `number`, + /// return `None` if there is no block we should vote on. + pub fn voting_target( + &self, + best_beefy: Option>, + best_grandpa: NumberFor, + ) -> Option> { + let rounds = if let Some(r) = self.sessions.front() { + r + } else { + debug!(target: "beefy", "🥩 No voting round started"); + return None + }; + + // `target` is guaranteed > `best_beefy` since `min_block_delta` is at least `1`. + let target = + vote_target(best_grandpa, best_beefy, rounds.session_start(), self.min_block_delta); + trace!( + target: "beefy", + "🥩 best beefy: #{:?}, best finalized: #{:?}, current_vote_target: {:?}", + best_beefy, + best_grandpa, + target + ); + target + } +} + pub(crate) struct WorkerParams { pub client: Arc, pub backend: Arc, pub runtime: Arc, + pub sync_oracle: SO, pub key_store: BeefyKeystore, - pub signed_commitment_sender: BeefySignedCommitmentSender, - pub beefy_best_block_sender: BeefyBestBlockSender, pub gossip_engine: GossipEngine, pub gossip_validator: Arc>, - pub min_block_delta: u32, + pub links: BeefyVoterLinks, pub metrics: Option, - pub sync_oracle: SO, + pub min_block_delta: u32, } /// A BEEFY worker plays the BEEFY protocol pub(crate) struct BeefyWorker { + // utilities client: Arc, backend: Arc, runtime: Arc, + sync_oracle: SO, key_store: BeefyKeystore, - signed_commitment_sender: BeefySignedCommitmentSender, gossip_engine: GossipEngine, gossip_validator: Arc>, - /// Min delta in block numbers between two blocks, BEEFY should vote on - min_block_delta: u32, + + // channels + /// Links between the block importer, the background voter and the RPC layer. + links: BeefyVoterLinks, + + // voter state + /// BEEFY client metrics. metrics: Option, - rounds: Option>, - /// Buffer holding votes for blocks that the client hasn't seen finality for. - pending_votes: BTreeMap, Vec, AuthorityId, Signature>>>, - /// Best block we received a GRANDPA notification for + /// Best block we received a GRANDPA finality for. best_grandpa_block_header: ::Header, - /// Best block a BEEFY voting round has been concluded for + /// Best block a BEEFY voting round has been concluded for. best_beefy_block: Option>, - /// Used to keep RPC worker up to date on latest/best beefy - beefy_best_block_sender: BeefyBestBlockSender, - /// Validator set id for the last signed commitment - last_signed_id: u64, - /// Handle to the sync oracle - sync_oracle: SO, - // keep rustc happy - _backend: PhantomData, + /// Buffer holding votes for future processing. + pending_votes: BTreeMap, Vec, AuthorityId, Signature>>>, + /// Buffer holding justifications for future processing. + pending_justifications: BTreeMap, Vec>>, + /// Chooses which incoming votes to accept and which votes to generate. + voting_oracle: VoterOracle, } impl BeefyWorker @@ -122,13 +238,12 @@ where backend, runtime, key_store, - signed_commitment_sender, - beefy_best_block_sender, + sync_oracle, gossip_engine, gossip_validator, - min_block_delta, + links, metrics, - sync_oracle, + min_block_delta, } = worker_params; let last_finalized_header = client @@ -139,53 +254,29 @@ where client: client.clone(), backend, runtime, + sync_oracle, key_store, - signed_commitment_sender, gossip_engine, gossip_validator, - // always target at least one block better than current best beefy - min_block_delta: min_block_delta.max(1), + links, metrics, - rounds: None, - pending_votes: BTreeMap::new(), best_grandpa_block_header: last_finalized_header, best_beefy_block: None, - last_signed_id: 0, - beefy_best_block_sender, - sync_oracle, - _backend: PhantomData, + pending_votes: BTreeMap::new(), + pending_justifications: BTreeMap::new(), + voting_oracle: VoterOracle::new(min_block_delta), } } - /// Return `Some(number)` if we should be voting on block `number` now, - /// return `None` if there is no block we should vote on now. - fn current_vote_target(&self) -> Option> { - let rounds = if let Some(r) = &self.rounds { - r - } else { - debug!(target: "beefy", "🥩 No voting round started"); - return None - }; - - let best_finalized = *self.best_grandpa_block_header.number(); - // `target` is guaranteed > `best_beefy` since `min_block_delta` is at least `1`. - let target = vote_target( - best_finalized, - self.best_beefy_block, - *rounds.session_start(), - self.min_block_delta, - ); - trace!( - target: "beefy", - "🥩 best beefy: #{:?}, best finalized: #{:?}, current_vote_target: {:?}", - self.best_beefy_block, - best_finalized, - target - ); - if let Some(target) = &target { - metric_set!(self, beefy_should_vote_on, target); - } - target + /// Simple wrapper that gets MMR root from header digests or from client state. + fn get_mmr_root_digest(&self, header: &B::Header) -> Option { + find_mmr_root_digest::(header).or_else(|| { + self.runtime + .runtime_api() + .mmr_root(&BlockId::hash(header.hash())) + .ok() + .and_then(|r| r.ok()) + }) } /// Verify `active` validator set for `block` against the key store @@ -200,7 +291,7 @@ where &self, block: &NumberFor, active: &ValidatorSet, - ) -> Result<(), error::Error> { + ) -> Result<(), Error> { let active: BTreeSet<&AuthorityId> = active.validators().iter().collect(); let public_keys = self.key_store.public_keys()?; @@ -209,121 +300,103 @@ where if store.intersection(&active).count() == 0 { let msg = "no authority public key found in store".to_string(); debug!(target: "beefy", "🥩 for block {:?} {}", block, msg); - Err(error::Error::Keystore(msg)) + Err(Error::Keystore(msg)) } else { Ok(()) } } - /// Set best BEEFY block to `block_num`. - /// - /// Also sends/updates the best BEEFY block hash to the RPC worker. - fn set_best_beefy_block(&mut self, block_num: NumberFor) { - if Some(block_num) > self.best_beefy_block { - // Try to get block hash ourselves. - let block_hash = match self.client.hash(block_num) { - Ok(h) => h, - Err(e) => { - error!(target: "beefy", "🥩 Failed to get hash for block number {}: {}", - block_num, e); - None - }, - }; - // Update RPC worker with new best BEEFY block hash. - block_hash.map(|hash| { - self.beefy_best_block_sender - .notify(|| Ok::<_, ()>(hash)) - .expect("forwards closure result; the closure always returns Ok; qed.") - }); - // Set new best BEEFY block number. - self.best_beefy_block = Some(block_num); - metric_set!(self, beefy_best_block, block_num); - } else { - debug!(target: "beefy", "🥩 Can't set best beefy to older: {}", block_num); - } - } - /// Handle session changes by starting new voting round for mandatory blocks. fn init_session_at( &mut self, - active: ValidatorSet, + validator_set: ValidatorSet, new_session_start: NumberFor, ) { - debug!(target: "beefy", "🥩 New active validator set: {:?}", active); - metric_set!(self, beefy_validator_set_id, active.id()); - // BEEFY should produce a signed commitment for each session - if active.id() != self.last_signed_id + 1 && - active.id() != GENESIS_AUTHORITY_SET_ID && - self.last_signed_id != 0 - { - debug!( - target: "beefy", "🥩 Detected skipped session: active-id {:?}, last-signed-id {:?}", - active.id(), - self.last_signed_id, - ); - metric_inc!(self, beefy_skipped_sessions); + debug!(target: "beefy", "🥩 New active validator set: {:?}", validator_set); + metric_set!(self, beefy_validator_set_id, validator_set.id()); + + // BEEFY should produce the mandatory block of each session. + if let Some(active_session) = self.voting_oracle.rounds_mut() { + if !active_session.mandatory_done() { + debug!( + target: "beefy", "🥩 New session {} while active session {} is still lagging.", + validator_set.id(), + active_session.validator_set_id(), + ); + metric_inc!(self, beefy_lagging_sessions); + } } if log_enabled!(target: "beefy", log::Level::Debug) { // verify the new validator set - only do it if we're also logging the warning - let _ = self.verify_validator_set(&new_session_start, &active); + let _ = self.verify_validator_set(&new_session_start, &validator_set); } - let id = active.id(); - self.rounds = Some(Rounds::new(new_session_start, active)); + let id = validator_set.id(); + self.voting_oracle.add_session(Rounds::new(new_session_start, validator_set)); info!(target: "beefy", "🥩 New Rounds for validator set id: {:?} with session_start {:?}", id, new_session_start); } fn handle_finality_notification(&mut self, notification: &FinalityNotification) { debug!(target: "beefy", "🥩 Finality notification: {:?}", notification); - let number = *notification.header.number(); - - // On start-up ignore old finality notifications that we're not interested in. - if number <= *self.best_grandpa_block_header.number() { - debug!(target: "beefy", "🥩 Got unexpected finality for old block #{:?}", number); - return - } + let header = ¬ification.header; - // update best GRANDPA finalized block we have seen - self.best_grandpa_block_header = notification.header.clone(); + if *header.number() > *self.best_grandpa_block_header.number() { + // update best GRANDPA finalized block we have seen + self.best_grandpa_block_header = header.clone(); - self.handle_finality(¬ification.header); - } - - fn handle_finality(&mut self, header: &B::Header) { - // Check for and handle potential new session. - if let Some(new_validator_set) = find_authorities_change::(header) { - self.init_session_at(new_validator_set, *header.number()); + // Check for and enqueue potential new session. + if let Some(new_validator_set) = find_authorities_change::(header) { + self.init_session_at(new_validator_set, *header.number()); + // TODO: when adding SYNC protocol, fire up a request for justification for this + // mandatory block here. + } } + } - // Handle any pending votes for now finalized blocks. - self.check_pending_votes(); - - // Vote if there's now a new vote target. - if let Some(target_number) = self.current_vote_target() { - self.do_vote(target_number); - } + /// Based on [VoterOracle] this vote is either processed here or enqueued for later. + fn triage_incoming_vote( + &mut self, + vote: VoteMessage, AuthorityId, Signature>, + ) -> Result<(), Error> { + let block_num = vote.commitment.block_number; + let best_grandpa = *self.best_grandpa_block_header.number(); + match self.voting_oracle.triage_round(block_num, best_grandpa)? { + RoundAction::Process => self.handle_vote( + (vote.commitment.payload, vote.commitment.block_number), + (vote.id, vote.signature), + false, + )?, + RoundAction::Enqueue => { + debug!(target: "beefy", "🥩 Buffer vote for round: {:?}.", block_num); + self.pending_votes.entry(block_num).or_default().push(vote) + }, + RoundAction::Drop => (), + }; + Ok(()) } - // Handles all buffered votes for now finalized blocks. - fn check_pending_votes(&mut self) { - let not_finalized = self.best_grandpa_block_header.number().saturating_add(1u32.into()); - let still_pending = self.pending_votes.split_off(¬_finalized); - let votes_to_handle = std::mem::replace(&mut self.pending_votes, still_pending); - for (num, votes) in votes_to_handle.into_iter() { - if Some(num) > self.best_beefy_block { - debug!(target: "beefy", "🥩 Handling buffered votes for now GRANDPA finalized block: {:?}.", num); - for v in votes.into_iter() { - self.handle_vote( - (v.commitment.payload, v.commitment.block_number), - (v.id, v.signature), - false, - ); - } - } else { - debug!(target: "beefy", "🥩 Dropping outdated buffered votes for now BEEFY finalized block: {:?}.", num); - } - } + /// Based on [VoterOracle] this justification is either processed here or enqueued for later. + /// + /// Expects `justification` to be valid. + fn triage_incoming_justif( + &mut self, + justification: BeefyVersionedFinalityProof, + ) -> Result<(), Error> { + let signed_commitment = match justification { + VersionedFinalityProof::V1(ref sc) => sc, + }; + let block_num = signed_commitment.commitment.block_number; + let best_grandpa = *self.best_grandpa_block_header.number(); + match self.voting_oracle.triage_round(block_num, best_grandpa)? { + RoundAction::Process => self.finalize(justification), + RoundAction::Enqueue => { + debug!(target: "beefy", "🥩 Buffer justification for round: {:?}.", block_num); + self.pending_justifications.entry(block_num).or_default().push(justification) + }, + RoundAction::Drop => (), + }; + Ok(()) } fn handle_vote( @@ -331,63 +404,146 @@ where round: (Payload, NumberFor), vote: (AuthorityId, Signature), self_vote: bool, - ) { + ) -> Result<(), Error> { self.gossip_validator.note_round(round.1); - let rounds = if let Some(rounds) = self.rounds.as_mut() { - rounds - } else { - debug!(target: "beefy", "🥩 Missing validator set - can't handle vote {:?}", vote); - return - }; + let rounds = self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?; if rounds.add_vote(&round, vote, self_vote) { if let Some(signatures) = rounds.try_conclude(&round) { self.gossip_validator.conclude_round(round.1); - // id is stored for skipped session metric calculation - self.last_signed_id = rounds.validator_set_id(); - let block_num = round.1; let commitment = Commitment { payload: round.0, block_number: block_num, - validator_set_id: self.last_signed_id, + validator_set_id: rounds.validator_set_id(), }; - let signed_commitment = SignedCommitment { commitment, signatures }; + let finality_proof = + VersionedFinalityProof::V1(SignedCommitment { commitment, signatures }); metric_set!(self, beefy_round_concluded, block_num); - info!(target: "beefy", "🥩 Round #{} concluded, committed: {:?}.", round.1, signed_commitment); + info!(target: "beefy", "🥩 Round #{} concluded, finality_proof: {:?}.", round.1, finality_proof); if let Err(e) = self.backend.append_justification( BlockId::Number(block_num), - ( - BEEFY_ENGINE_ID, - VersionedFinalityProof::V1(signed_commitment.clone()).encode(), - ), + (BEEFY_ENGINE_ID, finality_proof.clone().encode()), ) { - debug!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, signed_commitment); + debug!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, finality_proof); } - self.signed_commitment_sender - .notify(|| Ok::<_, ()>(signed_commitment)) - .expect("forwards closure result; the closure always returns Ok; qed."); - self.set_best_beefy_block(block_num); + // We created the `finality_proof` and know to be valid. + self.finalize(finality_proof); + } + } + Ok(()) + } + + /// Provide BEEFY finality for block based on `finality_proof`: + /// 1. Prune irrelevant past sessions from the oracle, + /// 2. Set BEEFY best block, + /// 3. Send best block hash and `finality_proof` to RPC worker. + /// + /// Expects `finality proof` to be valid. + fn finalize(&mut self, finality_proof: BeefyVersionedFinalityProof) { + // Prune any now "finalized" sessions from queue. + self.voting_oracle.try_prune(); + let signed_commitment = match finality_proof { + VersionedFinalityProof::V1(ref sc) => sc, + }; + let block_num = signed_commitment.commitment.block_number; + if Some(block_num) > self.best_beefy_block { + // Set new best BEEFY block number. + self.best_beefy_block = Some(block_num); + metric_set!(self, beefy_best_block, block_num); + + self.client.hash(block_num).ok().flatten().map(|hash| { + self.links + .to_rpc_best_block_sender + .notify(|| Ok::<_, ()>(hash)) + .expect("forwards closure result; the closure always returns Ok; qed.") + }); + + self.links + .to_rpc_justif_sender + .notify(|| Ok::<_, ()>(finality_proof)) + .expect("forwards closure result; the closure always returns Ok; qed."); + } else { + debug!(target: "beefy", "🥩 Can't set best beefy to older: {}", block_num); + } + } + + /// Handle previously buffered justifications and votes that now land in the voting interval. + fn try_pending_justif_and_votes(&mut self) -> Result<(), Error> { + let best_grandpa = *self.best_grandpa_block_header.number(); + let _ph = PhantomData::::default(); + + fn to_process_for( + pending: &mut BTreeMap, Vec>, + (start, end): (NumberFor, NumberFor), + _: PhantomData, + ) -> BTreeMap, Vec> { + // These are still pending. + let still_pending = pending.split_off(&end.saturating_add(1u32.into())); + // These can be processed. + let to_handle = pending.split_off(&start); + // The rest can be dropped. + *pending = still_pending; + // Return ones to process. + to_handle + } - // Vote if there's now a new vote target. - if let Some(target_number) = self.current_vote_target() { - self.do_vote(target_number); + // Process pending justifications. + let interval = self.voting_oracle.accepted_interval(best_grandpa)?; + if !self.pending_justifications.is_empty() { + let justifs_to_handle = to_process_for(&mut self.pending_justifications, interval, _ph); + for (num, justifications) in justifs_to_handle.into_iter() { + debug!(target: "beefy", "🥩 Handle buffered justifications for: {:?}.", num); + for justif in justifications.into_iter() { + self.finalize(justif); + } + } + } + + // Process pending votes. + let interval = self.voting_oracle.accepted_interval(best_grandpa)?; + if !self.pending_votes.is_empty() { + let votes_to_handle = to_process_for(&mut self.pending_votes, interval, _ph); + for (num, votes) in votes_to_handle.into_iter() { + debug!(target: "beefy", "🥩 Handle buffered votes for: {:?}.", num); + for v in votes.into_iter() { + if let Err(err) = self.handle_vote( + (v.commitment.payload, v.commitment.block_number), + (v.id, v.signature), + false, + ) { + error!(target: "beefy", "🥩 Error handling buffered vote: {}", err); + }; } } } + Ok(()) + } + + /// Decide if should vote, then vote.. or don't.. + fn try_to_vote(&mut self) -> Result<(), Error> { + // Vote if there's now a new vote target. + if let Some(target) = self + .voting_oracle + .voting_target(self.best_beefy_block, *self.best_grandpa_block_header.number()) + { + metric_set!(self, beefy_should_vote_on, target); + self.do_vote(target)?; + } + Ok(()) } /// Create and gossip Signed Commitment for block number `target_number`. /// /// Also handle this self vote by calling `self.handle_vote()` for it. - fn do_vote(&mut self, target_number: NumberFor) { + fn do_vote(&mut self, target_number: NumberFor) -> Result<(), Error> { debug!(target: "beefy", "🥩 Try voting on {}", target_number); // Most of the time we get here, `target` is actually `best_grandpa`, @@ -395,18 +551,13 @@ where let target_header = if target_number == *self.best_grandpa_block_header.number() { self.best_grandpa_block_header.clone() } else { - match self.client.expect_header(BlockId::Number(target_number)) { - Ok(h) => h, - Err(err) => { - debug!( - target: "beefy", - "🥩 Could not get header for block #{:?} (error: {:?}), skipping vote..", - target_number, - err - ); - return - }, - } + self.client.expect_header(BlockId::Number(target_number)).map_err(|err| { + let err_msg = format!( + "Couldn't get header for block #{:?} (error: {:?}), skipping vote..", + target_number, err + ); + Error::Backend(err_msg) + })? }; let target_hash = target_header.hash(); @@ -414,36 +565,33 @@ where hash } else { warn!(target: "beefy", "🥩 No MMR root digest found for: {:?}", target_hash); - return + return Ok(()) }; let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, mmr_root.encode()); - let (validators, validator_set_id) = if let Some(rounds) = &self.rounds { - if !rounds.should_self_vote(&(payload.clone(), target_number)) { - debug!(target: "beefy", "🥩 Don't double vote for block number: {:?}", target_number); - return - } - (rounds.validators(), rounds.validator_set_id()) - } else { - debug!(target: "beefy", "🥩 Missing validator set - can't vote for: {:?}", target_hash); - return - }; + let rounds = self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?; + if !rounds.should_self_vote(&(payload.clone(), target_number)) { + debug!(target: "beefy", "🥩 Don't double vote for block number: {:?}", target_number); + return Ok(()) + } + let (validators, validator_set_id) = (rounds.validators(), rounds.validator_set_id()); + let authority_id = if let Some(id) = self.key_store.authority_id(validators) { debug!(target: "beefy", "🥩 Local authority id: {:?}", id); id } else { debug!(target: "beefy", "🥩 Missing validator id - can't vote for: {:?}", target_hash); - return + return Ok(()) }; let commitment = Commitment { payload, block_number: target_number, validator_set_id }; let encoded_commitment = commitment.encode(); - let signature = match self.key_store.sign(&authority_id, &*encoded_commitment) { + let signature = match self.key_store.sign(&authority_id, &encoded_commitment) { Ok(sig) => sig, Err(err) => { warn!(target: "beefy", "🥩 Error signing commitment: {:?}", err); - return + return Ok(()) }, }; @@ -451,7 +599,7 @@ where target: "beefy", "🥩 Produced signature using {:?}, is_valid: {:?}", authority_id, - BeefyKeystore::verify(&authority_id, &signature, &*encoded_commitment) + BeefyKeystore::verify(&authority_id, &signature, &encoded_commitment) ); let message = VoteMessage { commitment, id: authority_id, signature }; @@ -462,13 +610,17 @@ where debug!(target: "beefy", "🥩 Sent vote message: {:?}", message); - self.handle_vote( + if let Err(err) = self.handle_vote( (message.commitment.payload, message.commitment.block_number), (message.id, message.signature), true, - ); + ) { + error!(target: "beefy", "🥩 Error handling self vote: {}", err); + } self.gossip_engine.gossip_message(topic::(), encoded_message, false); + + Ok(()) } /// Wait for BEEFY runtime pallet to be available. @@ -494,6 +646,9 @@ where // Once we'll implement 'initial sync' (catch-up), the worker will be able to // start voting right away. self.handle_finality_notification(¬if); + if let Err(err) = self.try_to_vote() { + debug!(target: "beefy", "🥩 {}", err); + } break } else { trace!(target: "beefy", "🥩 Finality notification: {:?}", notif); @@ -529,15 +684,14 @@ where }) .fuse(), ); + let mut block_import_justif = self.links.from_block_import_justif_stream.subscribe().fuse(); loop { - while self.sync_oracle.is_major_syncing() { - debug!(target: "beefy", "Waiting for major sync to complete..."); - futures_timer::Delay::new(Duration::from_secs(5)).await; - } - let mut gossip_engine = &mut self.gossip_engine; - futures::select! { + // Wait for, and handle external events. + // The branches below only change 'state', actual voting happen afterwards, + // based on the new resulting 'state'. + futures::select_biased! { notification = finality_notifications.next() => { if let Some(notification) = notification { self.handle_finality_notification(¬ification); @@ -545,24 +699,24 @@ where return; } }, + // TODO: when adding SYNC protocol, join the on-demand justifications stream to + // this one, and handle them both here. + justif = block_import_justif.next() => { + if let Some(justif) = justif { + // Block import justifications have already been verified to be valid + // by `BeefyBlockImport`. + if let Err(err) = self.triage_incoming_justif(justif) { + debug!(target: "beefy", "🥩 {}", err); + } + } else { + return; + } + }, vote = votes.next() => { if let Some(vote) = vote { - let block_num = vote.commitment.block_number; - if block_num > *self.best_grandpa_block_header.number() { - // Only handle votes for blocks we _know_ have been finalized. - // Buffer vote to be handled later. - debug!( - target: "beefy", - "🥩 Buffering vote for not (yet) finalized block: {:?}.", - block_num - ); - self.pending_votes.entry(block_num).or_default().push(vote); - } else { - self.handle_vote( - (vote.commitment.payload, vote.commitment.block_number), - (vote.id, vote.signature), - false - ); + // Votes have already been verified to be valid by the gossip validator. + if let Err(err) = self.triage_incoming_vote(vote) { + debug!(target: "beefy", "🥩 {}", err); } } else { return; @@ -573,18 +727,20 @@ where return; } } - } - } - /// Simple wrapper that gets MMR root from header digests or from client state. - fn get_mmr_root_digest(&self, header: &B::Header) -> Option { - find_mmr_root_digest::(header).or_else(|| { - self.runtime - .runtime_api() - .mmr_root(&BlockId::hash(header.hash())) - .ok() - .and_then(|r| r.ok()) - }) + // Don't bother acting on 'state' changes during major sync. + if !self.sync_oracle.is_major_syncing() { + // Handle pending justifications and/or votes for now GRANDPA finalized blocks. + if let Err(err) = self.try_pending_justif_and_votes() { + debug!(target: "beefy", "🥩 {}", err); + } + + // There were external events, 'state' is changed, author a vote if needed/possible. + if let Err(err) = self.try_to_vote() { + debug!(target: "beefy", "🥩 {}", err); + } + } + } } } @@ -679,16 +835,16 @@ pub(crate) mod tests { use super::*; use crate::{ keystore::tests::Keyring, - notification::{BeefyBestBlockStream, BeefySignedCommitmentStream}, + notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, tests::{ create_beefy_keystore, get_beefy_streams, make_beefy_ids, two_validators::TestApi, BeefyPeer, BeefyTestNet, BEEFY_PROTOCOL_NAME, }, + BeefyRPCLinks, }; use futures::{executor::block_on, future::poll_fn, task::Poll}; - use crate::tests::BeefyLinkHalf; use sc_client_api::HeaderBackend; use sc_network::NetworkService; use sc_network_test::{PeersFullClient, TestNetFactory}; @@ -705,12 +861,22 @@ pub(crate) mod tests { ) -> BeefyWorker>> { let keystore = create_beefy_keystore(*key); - let (signed_commitment_sender, signed_commitment_stream) = - BeefySignedCommitmentStream::::channel(); - let (beefy_best_block_sender, beefy_best_block_stream) = + let (to_rpc_justif_sender, from_voter_justif_stream) = + BeefyVersionedFinalityProofStream::::channel(); + let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = BeefyBestBlockStream::::channel(); - let beefy_link_half = BeefyLinkHalf { signed_commitment_stream, beefy_best_block_stream }; - *peer.data.beefy_link_half.lock() = Some(beefy_link_half); + let (_, from_block_import_justif_stream) = + BeefyVersionedFinalityProofStream::::channel(); + + let beefy_rpc_links = + BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream }; + *peer.data.beefy_rpc_links.lock() = Some(beefy_rpc_links); + + let links = BeefyVoterLinks { + from_block_import_justif_stream, + to_rpc_justif_sender, + to_rpc_best_block_sender, + }; let api = Arc::new(TestApi {}); let network = peer.network_service().clone(); @@ -723,8 +889,7 @@ pub(crate) mod tests { backend: peer.client().as_backend(), runtime: api, key_store: Some(keystore).into(), - signed_commitment_sender, - beefy_best_block_sender, + links, gossip_engine, gossip_validator, min_block_delta, @@ -826,6 +991,107 @@ pub(crate) mod tests { assert_eq!(Some(1072), t); } + #[test] + fn should_vote_target() { + let mut oracle = VoterOracle::::new(1); + + // rounds not initialized -> should vote: `None` + assert_eq!(oracle.voting_target(None, 1), None); + + let keys = &[Keyring::Alice]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + + oracle.add_session(Rounds::new(1, validator_set.clone())); + + // under min delta + oracle.min_block_delta = 4; + assert_eq!(oracle.voting_target(Some(1), 1), None); + assert_eq!(oracle.voting_target(Some(2), 5), None); + + // vote on min delta + assert_eq!(oracle.voting_target(Some(4), 9), Some(8)); + oracle.min_block_delta = 8; + assert_eq!(oracle.voting_target(Some(10), 18), Some(18)); + + // vote on power of two + oracle.min_block_delta = 1; + assert_eq!(oracle.voting_target(Some(1000), 1008), Some(1004)); + assert_eq!(oracle.voting_target(Some(1000), 1016), Some(1008)); + + // nothing new to vote on + assert_eq!(oracle.voting_target(Some(1000), 1000), None); + + // vote on mandatory + oracle.sessions.clear(); + oracle.add_session(Rounds::new(1000, validator_set.clone())); + assert_eq!(oracle.voting_target(None, 1008), Some(1000)); + oracle.sessions.clear(); + oracle.add_session(Rounds::new(1001, validator_set.clone())); + assert_eq!(oracle.voting_target(Some(1000), 1008), Some(1001)); + } + + #[test] + fn test_oracle_accepted_interval() { + let keys = &[Keyring::Alice]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + + let mut oracle = VoterOracle::::new(1); + + // rounds not initialized -> should accept votes: `None` + assert!(oracle.accepted_interval(1).is_err()); + + let session_one = 1; + oracle.add_session(Rounds::new(session_one, validator_set.clone())); + // mandatory not done, only accept mandatory + for i in 0..15 { + assert_eq!(oracle.accepted_interval(i), Ok((session_one, session_one))); + } + + // add more sessions, nothing changes + let session_two = 11; + let session_three = 21; + oracle.add_session(Rounds::new(session_two, validator_set.clone())); + oracle.add_session(Rounds::new(session_three, validator_set.clone())); + // mandatory not done, should accept mandatory for session_one + for i in session_three..session_three + 15 { + assert_eq!(oracle.accepted_interval(i), Ok((session_one, session_one))); + } + + // simulate finish mandatory for session one, prune oracle + oracle.sessions.front_mut().unwrap().test_set_mandatory_done(true); + oracle.try_prune(); + // session_one pruned, should accept mandatory for session_two + for i in session_three..session_three + 15 { + assert_eq!(oracle.accepted_interval(i), Ok((session_two, session_two))); + } + + // simulate finish mandatory for session two, prune oracle + oracle.sessions.front_mut().unwrap().test_set_mandatory_done(true); + oracle.try_prune(); + // session_two pruned, should accept mandatory for session_three + for i in session_three..session_three + 15 { + assert_eq!(oracle.accepted_interval(i), Ok((session_three, session_three))); + } + + // simulate finish mandatory for session three + oracle.sessions.front_mut().unwrap().test_set_mandatory_done(true); + // verify all other blocks in this session are now open to voting + for i in session_three..session_three + 15 { + assert_eq!(oracle.accepted_interval(i), Ok((session_three, i))); + } + // pruning does nothing in this case + oracle.try_prune(); + for i in session_three..session_three + 15 { + assert_eq!(oracle.accepted_interval(i), Ok((session_three, i))); + } + + // adding new session automatically prunes "finalized" previous session + let session_four = 31; + oracle.add_session(Rounds::new(session_four, validator_set.clone())); + assert_eq!(oracle.sessions.front().unwrap().session_start(), session_four); + assert_eq!(oracle.accepted_interval(session_four + 10), Ok((session_four, session_four))); + } + #[test] fn extract_authorities_change_digest() { let mut header = Header::new( @@ -868,7 +1134,7 @@ pub(crate) mod tests { let mmr_root_hash = H256::random(); header.digest_mut().push(DigestItem::Consensus( BEEFY_ENGINE_ID, - ConsensusLog::::MmrRoot(mmr_root_hash.clone()).encode(), + ConsensusLog::::MmrRoot(mmr_root_hash).encode(), )); // verify validator set is correctly extracted from digest @@ -876,69 +1142,6 @@ pub(crate) mod tests { assert_eq!(extracted, Some(mmr_root_hash)); } - #[test] - fn should_vote_target() { - let keys = &[Keyring::Alice]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - - // rounds not initialized -> should vote: `None` - assert_eq!(worker.current_vote_target(), None); - - let set_up = |worker: &mut BeefyWorker< - Block, - Backend, - PeersFullClient, - TestApi, - Arc>, - >, - best_grandpa: u64, - best_beefy: Option, - session_start: u64, - min_delta: u32| { - let grandpa_header = Header::new( - best_grandpa, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); - worker.best_grandpa_block_header = grandpa_header; - worker.best_beefy_block = best_beefy; - worker.min_block_delta = min_delta; - worker.rounds = Some(Rounds::new(session_start, validator_set.clone())); - }; - - // under min delta - set_up(&mut worker, 1, Some(1), 1, 4); - assert_eq!(worker.current_vote_target(), None); - set_up(&mut worker, 5, Some(2), 1, 4); - assert_eq!(worker.current_vote_target(), None); - - // vote on min delta - set_up(&mut worker, 9, Some(4), 1, 4); - assert_eq!(worker.current_vote_target(), Some(8)); - set_up(&mut worker, 18, Some(10), 1, 8); - assert_eq!(worker.current_vote_target(), Some(18)); - - // vote on power of two - set_up(&mut worker, 1008, Some(1000), 1, 1); - assert_eq!(worker.current_vote_target(), Some(1004)); - set_up(&mut worker, 1016, Some(1000), 1, 2); - assert_eq!(worker.current_vote_target(), Some(1008)); - - // nothing new to vote on - set_up(&mut worker, 1000, Some(1000), 1, 1); - assert_eq!(worker.current_vote_target(), None); - - // vote on mandatory - set_up(&mut worker, 1008, None, 1000, 8); - assert_eq!(worker.current_vote_target(), Some(1000)); - set_up(&mut worker, 1008, Some(1000), 1001, 8); - assert_eq!(worker.current_vote_target(), Some(1001)); - } - #[test] fn keystore_vs_validator_set() { let keys = &[Keyring::Alice]; @@ -953,39 +1156,57 @@ pub(crate) mod tests { let keys = &[Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let err_msg = "no authority public key found in store".to_string(); - let expected = Err(error::Error::Keystore(err_msg)); + let expected = Err(Error::Keystore(err_msg)); assert_eq!(worker.verify_validator_set(&1, &validator_set), expected); // worker has no keystore worker.key_store = None.into(); - let expected_err = Err(error::Error::Keystore("no Keystore".into())); + let expected_err = Err(Error::Keystore("no Keystore".into())); assert_eq!(worker.verify_validator_set(&1, &validator_set), expected_err); } #[test] - fn setting_best_beefy_block() { + fn test_finalize() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let mut net = BeefyTestNet::new(1, 0); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - let (mut best_block_streams, _) = get_beefy_streams(&mut net, keys); + let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); + let mut finality_proof = finality_proofs.drain(..).next().unwrap(); - // no 'best beefy block' + let create_finality_proof = |block_num: NumberFor| { + let commitment = Commitment { + payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + block_number: block_num, + validator_set_id: validator_set.id(), + }; + VersionedFinalityProof::V1(SignedCommitment { commitment, signatures: vec![None] }) + }; + + // no 'best beefy block' or finality proofs assert_eq!(worker.best_beefy_block, None); block_on(poll_fn(move |cx| { assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); + assert_eq!(finality_proof.poll_next_unpin(cx), Poll::Pending); Poll::Ready(()) })); // unknown hash for block #1 - let (mut best_block_streams, _) = get_beefy_streams(&mut net, keys); + let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - worker.set_best_beefy_block(1); + let mut finality_proof = finality_proofs.drain(..).next().unwrap(); + let justif = create_finality_proof(1); + worker.finalize(justif.clone()); assert_eq!(worker.best_beefy_block, Some(1)); block_on(poll_fn(move |cx| { assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); + match finality_proof.poll_next_unpin(cx) { + // expect justification + Poll::Ready(Some(received)) => assert_eq!(received, justif), + v => panic!("unexpected value: {:?}", v), + } Poll::Ready(()) })); @@ -994,7 +1215,8 @@ pub(crate) mod tests { let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); net.generate_blocks(2, 10, &validator_set, false); - worker.set_best_beefy_block(2); + let justif = create_finality_proof(2); + worker.finalize(justif); assert_eq!(worker.best_beefy_block, Some(2)); block_on(poll_fn(move |cx| { match best_block_stream.poll_next_unpin(cx) { @@ -1010,20 +1232,17 @@ pub(crate) mod tests { } #[test] - fn setting_initial_session() { + fn should_init_session() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let mut net = BeefyTestNet::new(1, 0); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - assert!(worker.rounds.is_none()); + assert!(worker.voting_oracle.sessions.is_empty()); - // verify setting the correct validator sets and boundary for genesis session worker.init_session_at(validator_set.clone(), 1); - - let worker_rounds = worker.rounds.as_ref().unwrap(); - assert_eq!(worker_rounds.session_start(), &1); - // in genesis case both current and prev validator sets are the same + let worker_rounds = worker.voting_oracle.rounds_mut().unwrap(); + assert_eq!(worker_rounds.session_start(), 1); assert_eq!(worker_rounds.validators(), validator_set.validators()); assert_eq!(worker_rounds.validator_set_id(), validator_set.id()); @@ -1031,12 +1250,79 @@ pub(crate) mod tests { let keys = &[Keyring::Bob]; let new_validator_set = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); - // verify setting the correct validator sets and boundary for non-genesis session worker.init_session_at(new_validator_set.clone(), 11); + // Since mandatory is not done for old rounds, we still get those. + let rounds = worker.voting_oracle.rounds_mut().unwrap(); + assert_eq!(rounds.validator_set_id(), validator_set.id()); + // Let's finalize mandatory. + rounds.test_set_mandatory_done(true); + worker.voting_oracle.try_prune(); + // Now we should get the next round. + let rounds = worker.voting_oracle.rounds_mut().unwrap(); + // Expect new values. + assert_eq!(rounds.session_start(), 11); + assert_eq!(rounds.validators(), new_validator_set.validators()); + assert_eq!(rounds.validator_set_id(), new_validator_set.id()); + } + + #[test] + fn should_triage_votes_and_process_later() { + let keys = &[Keyring::Alice, Keyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + let mut net = BeefyTestNet::new(1, 0); + let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); + + fn new_vote( + block_number: NumberFor, + ) -> VoteMessage, AuthorityId, Signature> { + let commitment = Commitment { + payload: Payload::new(*b"BF", vec![]), + block_number, + validator_set_id: 0, + }; + VoteMessage { + commitment, + id: Keyring::Alice.public(), + signature: Keyring::Alice.sign(b"I am committed"), + } + } + + // best grandpa is 20 + let best_grandpa_header = Header::new( + 20u32.into(), + Default::default(), + Default::default(), + Default::default(), + Digest::default(), + ); - let worker_rounds = worker.rounds.as_ref().unwrap(); - assert_eq!(worker_rounds.session_start(), &11); - assert_eq!(worker_rounds.validators(), new_validator_set.validators()); - assert_eq!(worker_rounds.validator_set_id(), new_validator_set.id()); + worker.voting_oracle.add_session(Rounds::new(10, validator_set.clone())); + worker.best_grandpa_block_header = best_grandpa_header; + + // triage votes for blocks 10..13 + worker.triage_incoming_vote(new_vote(10)).unwrap(); + worker.triage_incoming_vote(new_vote(11)).unwrap(); + worker.triage_incoming_vote(new_vote(12)).unwrap(); + // triage votes for blocks 20..23 + worker.triage_incoming_vote(new_vote(20)).unwrap(); + worker.triage_incoming_vote(new_vote(21)).unwrap(); + worker.triage_incoming_vote(new_vote(22)).unwrap(); + + // vote for 10 should have been handled, while the rest buffered for later processing + let mut votes = worker.pending_votes.values(); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 11); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 12); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 20); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 21); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 22); + assert!(votes.next().is_none()); + + // simulate mandatory done, and retry buffered votes + worker.voting_oracle.rounds_mut().unwrap().test_set_mandatory_done(true); + worker.try_pending_justif_and_votes().unwrap(); + // all blocks <= grandpa finalized should have been handled, rest still buffered + let mut votes = worker.pending_votes.values(); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 21); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 22); } } diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index 6ab559dea46fd..b38dba03d6b7f 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -17,9 +17,9 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } impl-trait-for-tuples = "0.2.2" memmap2 = "0.5.0" serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" +serde_json = "1.0.85" sc-chain-spec-derive = { version = "4.0.0-dev", path = "./derive" } -sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index efb40d46f216a..2cc9f356e4df7 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -20,7 +20,7 @@ #![warn(missing_docs)] use crate::{extension::GetExtension, ChainType, Properties, RuntimeGenesis}; -use sc_network::config::MultiaddrWithPeerId; +use sc_network_common::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; use serde::{Deserialize, Serialize}; use serde_json as json; @@ -61,7 +61,16 @@ impl GenesisSource { let file = File::open(path).map_err(|e| { format!("Error opening spec file at `{}`: {}", path.display(), e) })?; - let genesis: GenesisContainer = json::from_reader(file) + // SAFETY: `mmap` is fundamentally unsafe since technically the file can change + // underneath us while it is mapped; in practice it's unlikely to be a + // problem + let bytes = unsafe { + memmap2::Mmap::map(&file).map_err(|e| { + format!("Error mmaping spec file `{}`: {}", path.display(), e) + })? + }; + + let genesis: GenesisContainer = json::from_slice(&bytes) .map_err(|e| format!("Error parsing spec file: {}", e))?; Ok(genesis.genesis) }, diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index 73d3e1af15492..9d2cc728b8288 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -184,7 +184,7 @@ pub use extension::{ }; pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; -use sc_network::config::MultiaddrWithPeerId; +use sc_network_common::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; use serde::{de::DeserializeOwned, Serialize}; use sp_core::storage::Storage; diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 72b4efde077ff..50d110d40eabd 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -18,15 +18,15 @@ clap = { version = "3.1.18", features = ["derive"] } fdlimit = "0.2.1" futures = "0.3.21" hex = "0.4.2" -libp2p = "0.45.1" +libp2p = "0.46.1" log = "0.4.17" names = { version = "0.13.0", default-features = false } parity-scale-codec = "3.0.0" rand = "0.7.3" regex = "1.5.5" -rpassword = "5.0.0" +rpassword = "7.0.0" serde = "1.0.136" -serde_json = "1.0.79" +serde_json = "1.0.85" thiserror = "1.0.30" tiny-bip39 = "0.8.2" tokio = { version = "1.17.0", features = ["signal", "rt-multi-thread", "parking_lot"] } diff --git a/client/cli/src/commands/chain_info_cmd.rs b/client/cli/src/commands/chain_info_cmd.rs index 0e57d1677efbb..cbc22cc4d52d9 100644 --- a/client/cli/src/commands/chain_info_cmd.rs +++ b/client/cli/src/commands/chain_info_cmd.rs @@ -73,11 +73,10 @@ impl ChainInfoCmd { B: BlockT, { let db_config = sc_client_db::DatabaseSettings { - state_cache_size: config.state_cache_size, - state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), + trie_cache_maximum_size: config.trie_cache_maximum_size, state_pruning: config.state_pruning.clone(), source: config.database.clone(), - keep_blocks: config.keep_blocks.clone(), + blocks_pruning: config.blocks_pruning, }; let backend = sc_service::new_db_backend::(db_config)?; let info: ChainInfo = backend.blockchain().info().into(); diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index b4a3885e39906..fd236aa0211dd 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -50,15 +50,11 @@ pub struct RunCmd { #[clap(long)] pub no_grandpa: bool, - /// Experimental: Run in light client mode. - #[clap(long)] - pub light: bool, - /// Listen to all RPC interfaces. /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC /// proxy server to filter out dangerous methods. More details: - /// . + /// . /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. #[clap(long)] pub rpc_external: bool, @@ -89,7 +85,7 @@ pub struct RunCmd { /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC /// proxy server to filter out dangerous methods. More details: - /// . + /// . /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. #[clap(long)] pub ws_external: bool, @@ -337,7 +333,7 @@ impl CliConfiguration for RunCmd { fn dev_key_seed(&self, is_dev: bool) -> Result> { Ok(self.get_keyring().map(|a| format!("//{}", a)).or_else(|| { - if is_dev && !self.light { + if is_dev { Some("//Alice".into()) } else { None @@ -363,16 +359,9 @@ impl CliConfiguration for RunCmd { fn role(&self, is_dev: bool) -> Result { let keyring = self.get_keyring(); - let is_light = self.light; - let is_authority = (self.validator || is_dev || keyring.is_some()) && !is_light; + let is_authority = self.validator || is_dev || keyring.is_some(); - Ok(if is_light { - sc_service::Role::Light - } else if is_authority { - sc_service::Role::Authority - } else { - sc_service::Role::Full - }) + Ok(if is_authority { sc_service::Role::Authority } else { sc_service::Role::Full }) } fn force_authoring(&self) -> Result { @@ -480,8 +469,8 @@ impl CliConfiguration for RunCmd { Ok(self.ws_max_out_buffer_capacity) } - fn transaction_pool(&self) -> Result { - Ok(self.pool_config.transaction_pool()) + fn transaction_pool(&self, is_dev: bool) -> Result { + Ok(self.pool_config.transaction_pool(is_dev)) } fn max_runtime_instances(&self) -> Result> { @@ -567,7 +556,7 @@ impl std::error::Error for TelemetryParsingError {} impl std::fmt::Display for TelemetryParsingError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match &*self { + match self { TelemetryParsingError::MissingVerbosity => write!(f, "Verbosity level missing"), TelemetryParsingError::VerbosityParsingError(e) => write!(f, "{}", e), } diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs index 32556f0ea728d..95849065471b4 100644 --- a/client/cli/src/commands/utils.rs +++ b/client/cli/src/commands/utils.rs @@ -48,7 +48,7 @@ pub fn read_uri(uri: Option<&String>) -> error::Result { uri.into() } } else { - rpassword::read_password_from_tty(Some("URI: "))? + rpassword::prompt_password("URI: ")? }; Ok(uri) diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 6e1317c11fbc4..92091ea118ff1 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -31,7 +31,7 @@ use sc_service::{ NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode, Role, RpcMethods, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, }, - ChainSpec, KeepBlocks, TracingReceiver, + BlocksPruning, ChainSpec, TracingReceiver, }; use sc_tracing::logging::LoggerBuilder; use std::{net::SocketAddr, path::PathBuf}; @@ -145,7 +145,7 @@ pub trait CliConfiguration: Sized { /// Get the transaction pool options /// /// By default this is `TransactionPoolOptions::default()`. - fn transaction_pool(&self) -> Result { + fn transaction_pool(&self, _is_dev: bool) -> Result { Ok(Default::default()) } @@ -211,12 +211,8 @@ pub trait CliConfiguration: Sized { base_path: &PathBuf, cache_size: usize, database: Database, - role: &Role, ) -> Result { - let role_dir = match role { - Role::Light => "light", - Role::Full | Role::Authority => "full", - }; + let role_dir = "full"; let rocksdb_path = base_path.join("db").join(role_dir); let paritydb_path = base_path.join("paritydb").join(role_dir); Ok(match database { @@ -234,18 +230,12 @@ pub trait CliConfiguration: Sized { }) } - /// Get the state cache size. + /// Get the trie cache maximum size. /// /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `0`. - fn state_cache_size(&self) -> Result { - Ok(self.import_params().map(|x| x.state_cache_size()).unwrap_or_default()) - } - - /// Get the state cache child ratio (if any). - /// - /// By default this is `None`. - fn state_cache_child_ratio(&self) -> Result> { - Ok(Default::default()) + /// If `None` is returned the trie cache is disabled. + fn trie_cache_maximum_size(&self) -> Result> { + Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default()) } /// Get the state pruning mode. @@ -261,11 +251,11 @@ pub trait CliConfiguration: Sized { /// Get the block pruning mode. /// /// By default this is retrieved from `block_pruning` if it is available. Otherwise its - /// `KeepBlocks::All`. - fn keep_blocks(&self) -> Result { + /// `BlocksPruning::All`. + fn blocks_pruning(&self) -> Result { self.pruning_params() - .map(|x| x.keep_blocks()) - .unwrap_or_else(|| Ok(KeepBlocks::All)) + .map(|x| x.blocks_pruning()) + .unwrap_or_else(|| Ok(BlocksPruning::All)) } /// Get the chain ID (string). @@ -523,7 +513,7 @@ pub trait CliConfiguration: Sized { impl_name: C::impl_name(), impl_version: C::impl_version(), tokio_handle, - transaction_pool: self.transaction_pool()?, + transaction_pool: self.transaction_pool(is_dev)?, network: self.network_config( &chain_spec, is_dev, @@ -536,11 +526,10 @@ pub trait CliConfiguration: Sized { )?, keystore_remote, keystore, - database: self.database_config(&config_dir, database_cache_size, database, &role)?, - state_cache_size: self.state_cache_size()?, - state_cache_child_ratio: self.state_cache_child_ratio()?, + database: self.database_config(&config_dir, database_cache_size, database)?, + trie_cache_maximum_size: self.trie_cache_maximum_size()?, state_pruning: self.state_pruning()?, - keep_blocks: self.keep_blocks()?, + blocks_pruning: self.blocks_pruning()?, wasm_method: self.wasm_method()?, wasm_runtime_overrides: self.wasm_runtime_overrides(), execution_strategies: self.execution_strategies(is_dev, is_validator)?, diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index aef7511ffc371..c851050838965 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -95,14 +95,30 @@ pub struct ImportParams { pub execution_strategies: ExecutionStrategiesParams, /// Specify the state cache size. + /// + /// Providing `0` will disable the cache. #[clap(long, value_name = "Bytes", default_value = "67108864")] - pub state_cache_size: usize, + pub trie_cache_size: usize, + + /// DEPRECATED + /// + /// Switch to `--trie-cache-size`. + #[clap(long)] + state_cache_size: Option, } impl ImportParams { - /// Specify the state cache size. - pub fn state_cache_size(&self) -> usize { - self.state_cache_size + /// Specify the trie cache maximum size. + pub fn trie_cache_maximum_size(&self) -> Option { + if self.state_cache_size.is_some() { + eprintln!("`--state-cache-size` was deprecated. Please switch to `--trie-cache-size`."); + } + + if self.trie_cache_size == 0 { + None + } else { + Some(self.trie_cache_size) + } } /// Get the WASM execution method from the parameters diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index 46403f95fbc4b..386d1791dd805 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -94,7 +94,7 @@ impl KeystoreParams { let (password_interactive, password) = (self.password_interactive, self.password.clone()); let pass = if password_interactive { - let password = rpassword::read_password_from_tty(Some("Key password: "))?; + let password = rpassword::prompt_password("Key password: ")?; Some(SecretString::new(password)) } else { password @@ -105,6 +105,5 @@ impl KeystoreParams { } fn input_keystore_password() -> Result { - rpassword::read_password_from_tty(Some("Keystore password: ")) - .map_err(|e| format!("{:?}", e).into()) + rpassword::prompt_password("Keystore password: ").map_err(|e| format!("{:?}", e).into()) } diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 9d471224cc59e..34a0982e63d95 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -18,7 +18,7 @@ use crate::error; use clap::Args; -use sc_service::{KeepBlocks, PruningMode}; +use sc_service::{BlocksPruning, PruningMode}; /// Parameters to define the pruning mode #[derive(Debug, Clone, PartialEq, Args)] @@ -28,37 +28,37 @@ pub struct PruningParams { /// Default is to keep only the last 256 blocks, /// otherwise, the state can be kept for all of the blocks (i.e 'archive'), /// or for all of the canonical blocks (i.e 'archive-canonical'). - #[clap(long, value_name = "PRUNING_MODE")] - pub pruning: Option, + #[clap(alias = "pruning", long, value_name = "PRUNING_MODE")] + pub state_pruning: Option, /// Specify the number of finalized blocks to keep in the database. /// /// Default is to keep all blocks. /// /// NOTE: only finalized blocks are subject for removal! - #[clap(long, value_name = "COUNT")] - pub keep_blocks: Option, + #[clap(alias = "keep-blocks", long, value_name = "COUNT")] + pub blocks_pruning: Option, } impl PruningParams { /// Get the pruning value from the parameters pub fn state_pruning(&self) -> error::Result> { - self.pruning + self.state_pruning .as_ref() .map(|s| match s.as_str() { "archive" => Ok(PruningMode::ArchiveAll), bc => bc .parse() .map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string())) - .map(PruningMode::keep_blocks), + .map(PruningMode::blocks_pruning), }) .transpose() } /// Get the block pruning value from the parameters - pub fn keep_blocks(&self) -> error::Result { - Ok(match self.keep_blocks { - Some(n) => KeepBlocks::Some(n), - None => KeepBlocks::All, + pub fn blocks_pruning(&self) -> error::Result { + Ok(match self.blocks_pruning { + Some(n) => BlocksPruning::Some(n), + None => BlocksPruning::All, }) } } diff --git a/client/cli/src/params/transaction_pool_params.rs b/client/cli/src/params/transaction_pool_params.rs index efb78430ced55..6429dfec3f908 100644 --- a/client/cli/src/params/transaction_pool_params.rs +++ b/client/cli/src/params/transaction_pool_params.rs @@ -29,11 +29,15 @@ pub struct TransactionPoolParams { /// Maximum number of kilobytes of all transactions stored in the pool. #[clap(long, value_name = "COUNT", default_value = "20480")] pub pool_kbytes: usize, + + /// How long a transaction is banned for, if it is considered invalid. Defaults to 1800s. + #[clap(long, value_name = "SECONDS")] + pub tx_ban_seconds: Option, } impl TransactionPoolParams { /// Fill the given `PoolConfiguration` by looking at the cli parameters. - pub fn transaction_pool(&self) -> TransactionPoolOptions { + pub fn transaction_pool(&self, is_dev: bool) -> TransactionPoolOptions { let mut opts = TransactionPoolOptions::default(); // ready queue @@ -45,6 +49,14 @@ impl TransactionPoolParams { opts.future.count = self.pool_limit / factor; opts.future.total_bytes = self.pool_kbytes * 1024 / factor; + opts.ban_time = if let Some(ban_seconds) = self.tx_ban_seconds { + std::time::Duration::from_secs(ban_seconds) + } else if is_dev { + std::time::Duration::from_secs(0) + } else { + std::time::Duration::from_secs(30 * 60) + }; + opts } } diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 69499fa346e31..3fe9891e9a7ba 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" log = "0.4.17" @@ -37,7 +37,7 @@ sp-keystore = { version = "0.12.0", path = "../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [dev-dependencies] -parking_lot = "0.12.0" +parking_lot = "0.12.1" tempfile = "3.1.0" sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } sc-network = { version = "0.10.0-dev", path = "../../network" } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index ac3b89f2ff9a2..92fe1fa3cf29d 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -566,7 +566,6 @@ mod tests { use sc_consensus::BoxJustificationImport; use sc_consensus_slots::{BackoffAuthoringOnFinalizedHeadLagging, SimpleSlotWorker}; use sc_keystore::LocalKeystore; - use sc_network::config::ProtocolConfig; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::AURA; use sp_consensus::{ @@ -645,6 +644,7 @@ mod tests { >; type AuraPeer = Peer<(), PeersClient>; + #[derive(Default)] pub struct AuraTestNet { peers: Vec, } @@ -654,17 +654,7 @@ mod tests { type PeerData = (); type BlockImport = PeersClient; - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - AuraTestNet { peers: Vec::new() } - } - - fn make_verifier( - &self, - client: PeersClient, - _cfg: &ProtocolConfig, - _peer_data: &(), - ) -> Self::Verifier { + fn make_verifier(&self, client: PeersClient, _peer_data: &()) -> Self::Verifier { let client = client.as_client(); let slot_duration = slot_duration(&*client).expect("slot duration available"); @@ -831,7 +821,7 @@ mod tests { block_import: client, env: environ, keystore: keystore.into(), - sync_oracle: DummyOracle.clone(), + sync_oracle: DummyOracle, justification_sync_link: (), force_authoring: false, backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), @@ -883,7 +873,7 @@ mod tests { block_import: client.clone(), env: environ, keystore: keystore.into(), - sync_oracle: DummyOracle.clone(), + sync_oracle: DummyOracle, justification_sync_link: (), force_authoring: false, backoff_authoring_blocks: Option::<()>::None, diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 765fc367f424f..e559ec165a793 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } @@ -24,9 +24,8 @@ merlin = "2.0" num-bigint = "0.2.3" num-rational = "0.2.2" num-traits = "0.2.8" -parking_lot = "0.12.0" +parking_lot = "0.12.1" rand = "0.7.2" -retain_mut = "0.1.4" schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"] } serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0" diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 4c9350735d529..8433e3ac92e57 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } futures = "0.3.21" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0" @@ -30,7 +30,7 @@ sp-keystore = { version = "0.12.0", path = "../../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" tempfile = "3.1.0" tokio = "1.17.0" sc-consensus = { version = "0.10.0-dev", path = "../../../consensus/common" } diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index af19d410346e3..b000d38a44f02 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -262,7 +262,7 @@ mod tests { let (response, _) = api.raw_json_request(request).await.unwrap(); let expected = r#"{"jsonrpc":"2.0","result":{"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY":{"primary":[0],"secondary":[1,2,4],"secondary_vrf":[]}},"id":1}"#; - assert_eq!(&response, expected); + assert_eq!(&response.result, expected); } #[tokio::test] @@ -274,6 +274,6 @@ mod tests { let (response, _) = api.raw_json_request(request).await.unwrap(); let expected = r#"{"jsonrpc":"2.0","error":{"code":-32601,"message":"RPC call is unsafe to be called externally"},"id":1}"#; - assert_eq!(&response, expected); + assert_eq!(&response.result, expected); } } diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index 43df26a9a29ae..896bfaeda1dc9 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -310,7 +310,7 @@ mod tests { assert!(claim_slot(10.into(), &epoch, &keystore).is_none()); - epoch.authorities.push((valid_public_key.clone().into(), 10)); + epoch.authorities.push((valid_public_key.into(), 10)); assert_eq!(claim_slot(10.into(), &epoch, &keystore).unwrap().1, valid_public_key.into()); } } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 8478122375319..f61ba23d920f3 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -87,7 +87,6 @@ use futures::{ use log::{debug, info, log, trace, warn}; use parking_lot::Mutex; use prometheus_endpoint::Registry; -use retain_mut::RetainMut; use schnorrkel::SignatureError; use sc_client_api::{ @@ -835,17 +834,16 @@ where slot: Slot, epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) { - RetainMut::retain_mut(&mut *self.slot_notification_sinks.lock(), |sink| { - match sink.try_send((slot, epoch_descriptor.clone())) { - Ok(()) => true, - Err(e) => - if e.is_full() { - warn!(target: "babe", "Trying to notify a slot but the channel is full"); - true - } else { - false - }, - } + let sinks = &mut self.slot_notification_sinks.lock(); + sinks.retain_mut(|sink| match sink.try_send((slot, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => + if e.is_full() { + warn!(target: "babe", "Trying to notify a slot but the channel is full"); + true + } else { + false + }, }); } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index e0590fc0cd86e..5ecdb42f7f177 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -29,7 +29,6 @@ use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer}; use sc_consensus::{BoxBlockImport, BoxJustificationImport}; use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_keystore::LocalKeystore; -use sc_network::config::ProtocolConfig; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::BABE; use sp_consensus::{AlwaysCanAuthor, DisableProofRecording, NoNetwork as DummyOracle, Proposal}; @@ -153,7 +152,7 @@ impl DummyProposer { // that will re-check the randomness logic off-chain. let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { authorities: epoch.authorities.clone(), - randomness: epoch.randomness.clone(), + randomness: epoch.randomness, }) .encode(); let digest = DigestItem::Consensus(BABE_ENGINE_ID, digest_data); @@ -220,6 +219,7 @@ where type BabePeer = Peer, BabeBlockImport>; +#[derive(Default)] pub struct BabeTestNet { peers: Vec, } @@ -278,12 +278,6 @@ impl TestNetFactory for BabeTestNet { type PeerData = Option; type BlockImport = BabeBlockImport; - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - debug!(target: "babe", "Creating test network from config"); - BabeTestNet { peers: Vec::new() } - } - fn make_block_import( &self, client: PeersClient, @@ -309,12 +303,7 @@ impl TestNetFactory for BabeTestNet { ) } - fn make_verifier( - &self, - client: PeersClient, - _cfg: &ProtocolConfig, - maybe_link: &Option, - ) -> Self::Verifier { + fn make_verifier(&self, client: PeersClient, maybe_link: &Option) -> Self::Verifier { use substrate_test_runtime_client::DefaultTestClientBuilderExt; let client = client.as_client(); diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 6d76eba0935ff..180ad7c38008d 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -13,12 +13,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.42" +async-trait = "0.1.57" futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" -libp2p = { version = "0.45.1", default-features = false } +libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" serde = { version = "1.0", features = ["derive"] } thiserror = "1.0.30" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index f81c8eb7e8dee..10739f63ef779 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -433,10 +433,10 @@ impl JustificationSyncLink for () { impl> JustificationSyncLink for Arc { fn request_justification(&self, hash: &B::Hash, number: NumberFor) { - L::request_justification(&*self, hash, number); + L::request_justification(self, hash, number); } fn clear_justification_requests(&self) { - L::clear_justification_requests(&*self); + L::clear_justification_requests(self); } } diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs index a7b456191b000..c71e21ccd4b00 100644 --- a/client/consensus/common/src/import_queue.rs +++ b/client/consensus/common/src/import_queue.rs @@ -160,6 +160,16 @@ pub enum BlockImportStatus { ImportedUnknown(N, ImportedAux, Option), } +impl BlockImportStatus { + /// Returns the imported block number. + pub fn number(&self) -> &N { + match self { + BlockImportStatus::ImportedKnown(n, _) | + BlockImportStatus::ImportedUnknown(n, _, _) => n, + } + } +} + /// Block import error. #[derive(Debug, thiserror::Error)] pub enum BlockImportError { diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs index 9fe293142050b..84ccba990e599 100644 --- a/client/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -535,7 +535,7 @@ mod tests { _number: BlockNumber, _success: bool, ) { - self.events.push(Event::JustificationImported(hash.clone())) + self.events.push(Event::JustificationImported(*hash)) } } diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index fee69613debf0..2e0186495db5e 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -1063,7 +1063,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) @@ -1080,7 +1080,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"X", 1, *b"0", incremented_epoch) @@ -1145,7 +1145,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) @@ -1162,7 +1162,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"X", 1, *b"0", incremented_epoch) @@ -1220,7 +1220,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"1", 1, *b"0", incremented_epoch) @@ -1330,7 +1330,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) @@ -1347,7 +1347,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&epoch_b_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"B", 201, *b"A", incremented_epoch) @@ -1364,7 +1364,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"C", 1, *b"0", incremented_epoch) diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 7aa2232178f22..9c3bc5413317d 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -13,9 +13,9 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } assert_matches = "1.3.0" -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" log = "0.4.17" diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs index dfd3730fd3427..b5dfc3d809c13 100644 --- a/client/consensus/manual-seal/src/consensus.rs +++ b/client/consensus/manual-seal/src/consensus.rs @@ -33,6 +33,9 @@ pub trait ConsensusDataProvider: Send + Sync { /// Block import transaction type type Transaction; + /// The proof type. + type Proof; + /// Attempt to create a consensus digest. fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result; @@ -42,5 +45,6 @@ pub trait ConsensusDataProvider: Send + Sync { parent: &B::Header, params: &mut BlockImportParams, inherents: &InherentData, + proof: Self::Proof, ) -> Result<(), Error>; } diff --git a/client/consensus/manual-seal/src/consensus/aura.rs b/client/consensus/manual-seal/src/consensus/aura.rs index 7b5d6720562be..065b78732cdc3 100644 --- a/client/consensus/manual-seal/src/consensus/aura.rs +++ b/client/consensus/manual-seal/src/consensus/aura.rs @@ -35,14 +35,14 @@ use sp_timestamp::TimestampInherentData; use std::{marker::PhantomData, sync::Arc}; /// Consensus data provider for Aura. -pub struct AuraConsensusDataProvider { +pub struct AuraConsensusDataProvider { // slot duration slot_duration: SlotDuration, // phantom data for required generics - _phantom: PhantomData<(B, C)>, + _phantom: PhantomData<(B, C, P)>, } -impl AuraConsensusDataProvider +impl AuraConsensusDataProvider where B: BlockT, C: AuxStore + ProvideRuntimeApi + UsageProvider, @@ -58,7 +58,7 @@ where } } -impl ConsensusDataProvider for AuraConsensusDataProvider +impl ConsensusDataProvider for AuraConsensusDataProvider where B: BlockT, C: AuxStore @@ -67,8 +67,10 @@ where + UsageProvider + ProvideRuntimeApi, C::Api: AuraApi, + P: Send + Sync, { type Transaction = TransactionFor; + type Proof = P; fn create_digest( &self, @@ -92,6 +94,7 @@ where _parent: &B::Header, _params: &mut BlockImportParams, _inherents: &InherentData, + _proof: Self::Proof, ) -> Result<(), Error> { Ok(()) } diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 3e7770cd982d2..cc73a3fa961ce 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -31,7 +31,7 @@ use sc_consensus_epochs::{ descendent_query, EpochHeader, SharedEpochChanges, ViableEpochDescriptor, }; use sp_keystore::SyncCryptoStorePtr; -use std::{borrow::Cow, sync::Arc}; +use std::{borrow::Cow, marker::PhantomData, sync::Arc}; use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; use sp_api::{ProvideRuntimeApi, TransactionFor}; @@ -53,7 +53,7 @@ use sp_timestamp::TimestampInherentData; /// Provides BABE-compatible predigests and BlockImportParams. /// Intended for use with BABE runtimes. -pub struct BabeConsensusDataProvider { +pub struct BabeConsensusDataProvider { /// shared reference to keystore keystore: SyncCryptoStorePtr, @@ -68,6 +68,7 @@ pub struct BabeConsensusDataProvider { /// Authorities to be used for this babe chain. authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + _phantom: PhantomData

, } /// Verifier to be used for babe chains @@ -131,7 +132,7 @@ where } } -impl BabeConsensusDataProvider +impl BabeConsensusDataProvider where B: BlockT, C: AuxStore @@ -153,7 +154,14 @@ where let config = Config::get(&*client)?; - Ok(Self { config, client, keystore, epoch_changes, authorities }) + Ok(Self { + config, + client, + keystore, + epoch_changes, + authorities, + _phantom: Default::default(), + }) } fn epoch(&self, parent: &B::Header, slot: Slot) -> Result { @@ -181,7 +189,7 @@ where } } -impl ConsensusDataProvider for BabeConsensusDataProvider +impl ConsensusDataProvider for BabeConsensusDataProvider where B: BlockT, C: AuxStore @@ -190,8 +198,10 @@ where + UsageProvider + ProvideRuntimeApi, C::Api: BabeApi, + P: Send + Sync, { type Transaction = TransactionFor; + type Proof = P; fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result { let slot = inherents @@ -259,6 +269,7 @@ where parent: &B::Header, params: &mut BlockImportParams, inherents: &InherentData, + _proof: Self::Proof, ) -> Result<(), Error> { let slot = inherents .babe_inherent_data()? diff --git a/client/consensus/manual-seal/src/consensus/timestamp.rs b/client/consensus/manual-seal/src/consensus/timestamp.rs index e7f4e709ab996..70b5e5de4ec6c 100644 --- a/client/consensus/manual-seal/src/consensus/timestamp.rs +++ b/client/consensus/manual-seal/src/consensus/timestamp.rs @@ -46,10 +46,10 @@ use std::{ /// This works by either fetching the `slot_number` from the most recent header and dividing /// that value by `slot_duration` in order to fork chains that expect this inherent. /// -/// It produces timestamp inherents that are increaed by `slot_duraation` whenever +/// It produces timestamp inherents that are increased by `slot_duration` whenever /// `provide_inherent_data` is called. pub struct SlotTimestampProvider { - // holds the unix millisecnd timestamp for the most recent block + // holds the unix millisecond timestamp for the most recent block unix_millis: atomic::AtomicU64, // configured slot_duration in the runtime slot_duration: SlotDuration, diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index a8d2634ade560..c5dd169e281f2 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -81,7 +81,7 @@ where } /// Params required to start the instant sealing authorship task. -pub struct ManualSealParams, TP, SC, CS, CIDP> { +pub struct ManualSealParams, TP, SC, CS, CIDP, P> { /// Block import instance for well. importing blocks. pub block_import: BI, @@ -103,14 +103,14 @@ pub struct ManualSealParams, TP, SC, C /// Digest provider for inclusion in blocks. pub consensus_data_provider: - Option>>>, + Option>>>, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, } /// Params required to start the manual sealing authorship task. -pub struct InstantSealParams, TP, SC, CIDP> { +pub struct InstantSealParams, TP, SC, CIDP, P> { /// Block import instance for well. importing blocks. pub block_import: BI, @@ -128,14 +128,14 @@ pub struct InstantSealParams, TP, SC, /// Digest provider for inclusion in blocks. pub consensus_data_provider: - Option>>>, + Option>>>, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, } /// Creates the background authorship task for the manual seal engine. -pub async fn run_manual_seal( +pub async fn run_manual_seal( ManualSealParams { mut block_import, mut env, @@ -145,7 +145,7 @@ pub async fn run_manual_seal( select_chain, consensus_data_provider, create_inherent_data_providers, - }: ManualSealParams, + }: ManualSealParams, ) where B: BlockT + 'static, BI: BlockImport> @@ -155,12 +155,13 @@ pub async fn run_manual_seal( C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer>, + E::Proposer: Proposer>, CS: Stream::Hash>> + Unpin + 'static, SC: SelectChain + 'static, TransactionFor: 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, + P: Send + Sync + 'static, { while let Some(command) = commands_stream.next().await { match command { @@ -198,7 +199,7 @@ pub async fn run_manual_seal( /// runs the background authorship task for the instant seal engine. /// instant-seal creates a new block for every transaction imported into /// the transaction pool. -pub async fn run_instant_seal( +pub async fn run_instant_seal( InstantSealParams { block_import, env, @@ -207,7 +208,7 @@ pub async fn run_instant_seal( select_chain, consensus_data_provider, create_inherent_data_providers, - }: InstantSealParams, + }: InstantSealParams, ) where B: BlockT + 'static, BI: BlockImport> @@ -217,11 +218,12 @@ pub async fn run_instant_seal( C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer>, + E::Proposer: Proposer>, SC: SelectChain + 'static, TransactionFor: 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, + P: Send + Sync + 'static, { // instant-seal creates blocks as soon as transactions are imported // into the transaction pool. @@ -275,6 +277,7 @@ mod tests { C: ProvideRuntimeApi + Send + Sync, { type Transaction = TransactionFor; + type Proof = (); fn create_digest( &self, @@ -289,6 +292,7 @@ mod tests { _parent: &B::Header, params: &mut BlockImportParams, _inherents: &InherentData, + _proof: Self::Proof, ) -> Result<(), Error> { params.post_digests.push(DigestItem::Other(vec![1])); Ok(()) @@ -351,7 +355,7 @@ mod tests { assert_eq!( created_block, CreatedBlock { - hash: created_block.hash.clone(), + hash: created_block.hash, aux: ImportedAux { header_only: false, clear_justification_requests: false, @@ -418,7 +422,7 @@ mod tests { assert_eq!( created_block, CreatedBlock { - hash: created_block.hash.clone(), + hash: created_block.hash, aux: ImportedAux { header_only: false, clear_justification_requests: false, @@ -498,7 +502,7 @@ mod tests { assert_eq!( created_block, CreatedBlock { - hash: created_block.hash.clone(), + hash: created_block.hash, aux: ImportedAux { header_only: false, clear_justification_requests: false, diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 202b54fe5d0c5..32e3acf68506e 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -36,7 +36,7 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; pub const MAX_PROPOSAL_DURATION: u64 = 10; /// params for sealing a new block -pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP, CIDP> { +pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP, CIDP, P> { /// if true, empty blocks(without extrinsics) will be created. /// otherwise, will return Error::EmptyTransactionPool. pub create_empty: bool, @@ -56,7 +56,7 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP pub select_chain: &'a SC, /// Digest provider for inclusion in blocks. pub consensus_data_provider: - Option<&'a dyn ConsensusDataProvider>>, + Option<&'a dyn ConsensusDataProvider>>, /// block import object pub block_import: &'a mut BI, /// Something that can create the inherent data providers. @@ -64,7 +64,7 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP } /// seals a new block with the given params -pub async fn seal_block( +pub async fn seal_block( SealBlockParams { create_empty, finalize, @@ -77,7 +77,7 @@ pub async fn seal_block( create_inherent_data_providers, consensus_data_provider: digest_provider, mut sender, - }: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP>, + }: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP, P>, ) where B: BlockT, BI: BlockImport> @@ -86,11 +86,12 @@ pub async fn seal_block( + 'static, C: HeaderBackend + ProvideRuntimeApi, E: Environment, - E::Proposer: Proposer>, + E::Proposer: Proposer>, TP: TransactionPool, SC: SelectChain, TransactionFor: 'static, CIDP: CreateInherentDataProviders, + P: Send + Sync + 'static, { let future = async { if pool.status().ready == 0 && !create_empty { @@ -138,6 +139,7 @@ pub async fn seal_block( } let (header, body) = proposal.block.deconstruct(); + let proof = proposal.proof; let mut params = BlockImportParams::new(BlockOrigin::Own, header.clone()); params.body = Some(body); params.finalized = finalize; @@ -147,7 +149,7 @@ pub async fn seal_block( )); if let Some(digest_provider) = digest_provider { - digest_provider.append_block_import(&parent, &mut params, &inherent_data)?; + digest_provider.append_block_import(&parent, &mut params, &inherent_data, proof)?; } // Make sure we return the same post-hash that will be calculated when importing the block diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index 7e9b43fac8a57..4833786d2b990 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -13,12 +13,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.1" log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" thiserror = "1.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 6f9ee6f864ad8..f63e453a48026 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -518,7 +518,7 @@ pub fn start_mining_worker( select_chain: S, algorithm: Algorithm, mut env: E, - mut sync_oracle: SO, + sync_oracle: SO, justification_sync_link: L, pre_runtime: Option>, create_inherent_data_providers: CIDP, diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 41a6c1ad5e641..208e31971257d 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" futures-timer = "3.0.1" diff --git a/client/consensus/slots/src/aux_schema.rs b/client/consensus/slots/src/aux_schema.rs index eeaec68d369d2..c1d01500ffe47 100644 --- a/client/consensus/slots/src/aux_schema.rs +++ b/client/consensus/slots/src/aux_schema.rs @@ -89,8 +89,8 @@ where // 1) signed by the same voter, if prev_signer == signer { // 2) with different hash - if header.hash() != prev_header.hash() { - return Ok(Some(EquivocationProof { + return if header.hash() != prev_header.hash() { + Ok(Some(EquivocationProof { slot, offender: signer.clone(), first_header: prev_header.clone(), @@ -100,7 +100,7 @@ where // We don't need to continue in case of duplicated header, // since it's already saved and a possible equivocation // would have been detected before. - return Ok(None) + Ok(None) } } } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index a6fbc4bebc796..39b40a32f18ca 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -38,7 +38,7 @@ use log::{debug, info, warn}; use sc_consensus::{BlockImport, JustificationSyncLink}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN}; use sp_arithmetic::traits::BaseArithmetic; -use sp_consensus::{CanAuthorWith, Proposer, SelectChain, SyncOracle}; +use sp_consensus::{CanAuthorWith, Proposal, Proposer, SelectChain, SyncOracle}; use sp_consensus_slots::{Slot, SlotDuration}; use sp_inherents::CreateInherentDataProviders; use sp_runtime::{ @@ -103,7 +103,7 @@ pub trait SimpleSlotWorker { type Proposer: Proposer + Send; /// Data associated with a slot claim. - type Claim: Send + 'static; + type Claim: Send + Sync + 'static; /// Epoch data necessary for authoring. type EpochData: Send + Sync + 'static; @@ -183,6 +183,70 @@ pub trait SimpleSlotWorker { /// Remaining duration for proposing. fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration; + /// Propose a block by `Proposer`. + async fn propose( + &mut self, + proposer: Self::Proposer, + claim: &Self::Claim, + slot_info: SlotInfo, + proposing_remaining: Delay, + ) -> Option< + Proposal< + B, + >::Transaction, + >::Proof, + >, + > { + let slot = slot_info.slot; + let telemetry = self.telemetry(); + let logging_target = self.logging_target(); + let proposing_remaining_duration = self.proposing_remaining_duration(&slot_info); + let logs = self.pre_digest_data(slot, claim); + + // deadline our production to 98% of the total time left for proposing. As we deadline + // the proposing below to the same total time left, the 2% margin should be enough for + // the result to be returned. + let proposing = proposer + .propose( + slot_info.inherent_data, + sp_runtime::generic::Digest { logs }, + proposing_remaining_duration.mul_f32(0.98), + None, + ) + .map_err(|e| sp_consensus::Error::ClientImport(e.to_string())); + + let proposal = match futures::future::select(proposing, proposing_remaining).await { + Either::Left((Ok(p), _)) => p, + Either::Left((Err(err), _)) => { + warn!(target: logging_target, "Proposing failed: {}", err); + + return None + }, + Either::Right(_) => { + info!( + target: logging_target, + "⌛️ Discarding proposal for slot {}; block production took too long", slot, + ); + // If the node was compiled with debug, tell the user to use release optimizations. + #[cfg(build_type = "debug")] + info!( + target: logging_target, + "👉 Recompile your node in `--release` mode to mitigate this problem.", + ); + telemetry!( + telemetry; + CONSENSUS_INFO; + "slots.discarding_proposal_took_too_long"; + "slot" => *slot, + ); + + return None + }, + }; + + Some(proposal) + } + /// Implements [`SlotWorker::on_slot`]. async fn on_slot( &mut self, @@ -256,10 +320,8 @@ pub trait SimpleSlotWorker { } debug!( - target: self.logging_target(), - "Starting authorship at slot {}; timestamp = {}", - slot, - *timestamp, + target: logging_target, + "Starting authorship at slot {}; timestamp = {}", slot, *timestamp, ); telemetry!( @@ -287,48 +349,7 @@ pub trait SimpleSlotWorker { }, }; - let logs = self.pre_digest_data(slot, &claim); - - // deadline our production to 98% of the total time left for proposing. As we deadline - // the proposing below to the same total time left, the 2% margin should be enough for - // the result to be returned. - let proposing = proposer - .propose( - slot_info.inherent_data, - sp_runtime::generic::Digest { logs }, - proposing_remaining_duration.mul_f32(0.98), - None, - ) - .map_err(|e| sp_consensus::Error::ClientImport(e.to_string())); - - let proposal = match futures::future::select(proposing, proposing_remaining).await { - Either::Left((Ok(p), _)) => p, - Either::Left((Err(err), _)) => { - warn!(target: logging_target, "Proposing failed: {}", err); - - return None - }, - Either::Right(_) => { - info!( - target: logging_target, - "⌛️ Discarding proposal for slot {}; block production took too long", slot, - ); - // If the node was compiled with debug, tell the user to use release optimizations. - #[cfg(build_type = "debug")] - info!( - target: logging_target, - "👉 Recompile your node in `--release` mode to mitigate this problem.", - ); - telemetry!( - telemetry; - CONSENSUS_INFO; - "slots.discarding_proposal_took_too_long"; - "slot" => *slot, - ); - - return None - }, - }; + let proposal = self.propose(proposer, &claim, slot_info, proposing_remaining).await?; let (block, storage_proof) = (proposal.block, proposal.proof); let (header, body) = block.deconstruct(); @@ -469,7 +490,7 @@ pub async fn start_slot_worker( slot_duration: SlotDuration, client: C, mut worker: W, - mut sync_oracle: SO, + sync_oracle: SO, create_inherent_data_providers: CIDP, can_author_with: CAW, ) where diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 3b6402b3f6023..7f564ae642433 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -22,8 +22,8 @@ kvdb-memorydb = "0.11.0" kvdb-rocksdb = { version = "0.15.2", optional = true } linked-hash-map = "0.5.4" log = "0.4.17" -parity-db = "0.3.13" -parking_lot = "0.12.0" +parity-db = "0.3.16" +parking_lot = "0.12.1" sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-state-db = { version = "0.10.0-dev", path = "../state-db" } sp-arithmetic = { version = "5.0.0", path = "../../primitives/arithmetic" } @@ -35,9 +35,12 @@ sp-state-machine = { version = "0.12.0", path = "../../primitives/state-machine" sp-trie = { version = "6.0.0", path = "../../primitives/trie" } [dev-dependencies] +criterion = "0.3.3" kvdb-rocksdb = "0.15.1" +rand = "0.8.4" +tempfile = "3.1.0" quickcheck = { version = "1.0.3", default-features = false } -tempfile = "3" +kitchensink-runtime = { path = "../../bin/node/runtime" } sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } @@ -46,3 +49,10 @@ default = [] test-helpers = [] runtime-benchmarks = [] rocksdb = ["kvdb-rocksdb"] + +[[bench]] +name = "state_access" +harness = false + +[lib] +bench = false diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs new file mode 100644 index 0000000000000..78aed7858e342 --- /dev/null +++ b/client/db/benches/state_access.rs @@ -0,0 +1,312 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; +use rand::{distributions::Uniform, rngs::StdRng, Rng, SeedableRng}; +use sc_client_api::{Backend as _, BlockImportOperation, NewBlockState, StateBackend}; +use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; +use sp_core::H256; +use sp_runtime::{ + generic::BlockId, + testing::{Block as RawBlock, ExtrinsicWrapper, Header}, + StateVersion, Storage, +}; +use tempfile::TempDir; + +pub(crate) type Block = RawBlock>; + +fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 { + let mut op = db.begin_operation().unwrap(); + let mut header = Header { + number: 0, + parent_hash: Default::default(), + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + header.state_root = op + .set_genesis_state( + Storage { + top: vec![( + sp_core::storage::well_known_keys::CODE.to_vec(), + kitchensink_runtime::wasm_binary_unwrap().to_vec(), + )] + .into_iter() + .collect(), + children_default: Default::default(), + }, + true, + StateVersion::V1, + ) + .unwrap(); + + op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); + + db.commit_operation(op).unwrap(); + + let mut number = 1; + let mut parent_hash = header.hash(); + + for i in 0..10 { + let mut op = db.begin_operation().unwrap(); + + db.begin_state_operation(&mut op, BlockId::Hash(parent_hash)).unwrap(); + + let mut header = Header { + number, + parent_hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let changes = storage + .iter() + .skip(i * 100_000) + .take(100_000) + .map(|(k, v)| (k.clone(), Some(v.clone()))) + .collect::>(); + + let (state_root, tx) = db.state_at(BlockId::Number(number - 1)).unwrap().storage_root( + changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())), + StateVersion::V1, + ); + header.state_root = state_root; + + op.update_db_storage(tx).unwrap(); + op.update_storage(changes.clone(), Default::default()).unwrap(); + + op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); + + db.commit_operation(op).unwrap(); + + number += 1; + parent_hash = header.hash(); + } + + parent_hash +} + +enum BenchmarkConfig { + NoCache, + TrieNodeCache, +} + +fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend { + let path = temp_dir.path().to_owned(); + + let trie_cache_maximum_size = match config { + BenchmarkConfig::NoCache => None, + BenchmarkConfig::TrieNodeCache => Some(2 * 1024 * 1024 * 1024), + }; + + let settings = DatabaseSettings { + trie_cache_maximum_size, + state_pruning: Some(PruningMode::ArchiveAll), + source: DatabaseSource::ParityDb { path }, + blocks_pruning: BlocksPruning::All, + }; + + Backend::new(settings, 100).expect("Creates backend") +} + +/// Generate the storage that will be used for the benchmark +/// +/// Returns the `Vec` and the `Vec<(key, value)>` +fn generate_storage() -> (Vec>, Vec<(Vec, Vec)>) { + let mut rng = StdRng::seed_from_u64(353893213); + + let mut storage = Vec::new(); + let mut keys = Vec::new(); + + for _ in 0..1_000_000 { + let key_len: usize = rng.gen_range(32..128); + let key = (&mut rng) + .sample_iter(Uniform::new_inclusive(0, 255)) + .take(key_len) + .collect::>(); + + let value_len: usize = rng.gen_range(20..60); + let value = (&mut rng) + .sample_iter(Uniform::new_inclusive(0, 255)) + .take(value_len) + .collect::>(); + + keys.push(key.clone()); + storage.push((key, value)); + } + + (keys, storage) +} + +fn state_access_benchmarks(c: &mut Criterion) { + sp_tracing::try_init_simple(); + + let (keys, storage) = generate_storage(); + let path = TempDir::new().expect("Creates temporary directory"); + + let block_hash = { + let backend = create_backend(BenchmarkConfig::NoCache, &path); + insert_blocks(&backend, storage.clone()) + }; + + let mut group = c.benchmark_group("Reading entire state"); + group.sample_size(20); + + let mut bench_multiple_values = |config, desc, multiplier| { + let backend = create_backend(config, &path); + + group.bench_function(desc, |b| { + b.iter_batched( + || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + |state| { + for key in keys.iter().cycle().take(keys.len() * multiplier) { + let _ = state.storage(&key).expect("Doesn't fail").unwrap(); + } + }, + BatchSize::SmallInput, + ) + }); + }; + + bench_multiple_values( + BenchmarkConfig::TrieNodeCache, + "with trie node cache and reading each key once", + 1, + ); + bench_multiple_values(BenchmarkConfig::NoCache, "no cache and reading each key once", 1); + + bench_multiple_values( + BenchmarkConfig::TrieNodeCache, + "with trie node cache and reading 4 times each key in a row", + 4, + ); + bench_multiple_values( + BenchmarkConfig::NoCache, + "no cache and reading 4 times each key in a row", + 4, + ); + + group.finish(); + + let mut group = c.benchmark_group("Reading a single value"); + + let mut bench_single_value = |config, desc, multiplier| { + let backend = create_backend(config, &path); + + group.bench_function(desc, |b| { + b.iter_batched( + || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + |state| { + for key in keys.iter().take(1).cycle().take(multiplier) { + let _ = state.storage(&key).expect("Doesn't fail").unwrap(); + } + }, + BatchSize::SmallInput, + ) + }); + }; + + bench_single_value( + BenchmarkConfig::TrieNodeCache, + "with trie node cache and reading the key once", + 1, + ); + bench_single_value(BenchmarkConfig::NoCache, "no cache and reading the key once", 1); + + bench_single_value( + BenchmarkConfig::TrieNodeCache, + "with trie node cache and reading 4 times each key in a row", + 4, + ); + bench_single_value( + BenchmarkConfig::NoCache, + "no cache and reading 4 times each key in a row", + 4, + ); + + group.finish(); + + let mut group = c.benchmark_group("Hashing a value"); + + let mut bench_single_value = |config, desc, multiplier| { + let backend = create_backend(config, &path); + + group.bench_function(desc, |b| { + b.iter_batched( + || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + |state| { + for key in keys.iter().take(1).cycle().take(multiplier) { + let _ = state.storage_hash(&key).expect("Doesn't fail").unwrap(); + } + }, + BatchSize::SmallInput, + ) + }); + }; + + bench_single_value( + BenchmarkConfig::TrieNodeCache, + "with trie node cache and hashing the key once", + 1, + ); + bench_single_value(BenchmarkConfig::NoCache, "no cache and hashing the key once", 1); + + bench_single_value( + BenchmarkConfig::TrieNodeCache, + "with trie node cache and hashing 4 times each key in a row", + 4, + ); + bench_single_value( + BenchmarkConfig::NoCache, + "no cache and hashing 4 times each key in a row", + 4, + ); + + group.finish(); + + let mut group = c.benchmark_group("Hashing `:code`"); + + let mut bench_single_value = |config, desc| { + let backend = create_backend(config, &path); + + group.bench_function(desc, |b| { + b.iter_batched( + || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + |state| { + let _ = state + .storage_hash(sp_core::storage::well_known_keys::CODE) + .expect("Doesn't fail") + .unwrap(); + }, + BatchSize::SmallInput, + ) + }); + }; + + bench_single_value(BenchmarkConfig::TrieNodeCache, "with trie node cache"); + bench_single_value(BenchmarkConfig::NoCache, "no cache"); + + group.finish(); +} + +criterion_group!(benches, state_access_benchmarks); +criterion_main!(benches); diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index d3d43e742d026..b1f4e3b6c0af5 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -18,13 +18,7 @@ //! State backend that's useful for benchmarking -use std::{ - cell::{Cell, RefCell}, - collections::HashMap, - sync::Arc, -}; - -use crate::storage_cache::{new_shared_cache, CachingState, SharedCache}; +use crate::{DbState, DbStateBuilder}; use hash_db::{Hasher, Prefix}; use kvdb::{DBTransaction, KeyValueDB}; use linked_hash_map::LinkedHashMap; @@ -37,40 +31,31 @@ use sp_runtime::{ StateVersion, Storage, }; use sp_state_machine::{ - backend::Backend as StateBackend, ChildStorageCollection, DBValue, ProofRecorder, - StorageCollection, + backend::Backend as StateBackend, ChildStorageCollection, DBValue, StorageCollection, +}; +use sp_trie::{ + cache::{CacheSize, SharedTrieCache}, + prefixed_key, MemoryDB, +}; +use std::{ + cell::{Cell, RefCell}, + collections::HashMap, + sync::Arc, }; -use sp_trie::{prefixed_key, MemoryDB}; - -type DbState = - sp_state_machine::TrieBackend>>, HashFor>; -type State = CachingState, B>; +type State = DbState; struct StorageDb { db: Arc, - proof_recorder: Option>, _block: std::marker::PhantomData, } impl sp_state_machine::Storage> for StorageDb { fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { let prefixed_key = prefixed_key::>(key, prefix); - if let Some(recorder) = &self.proof_recorder { - if let Some(v) = recorder.get(key) { - return Ok(v) - } - let backend_value = self - .db - .get(0, &prefixed_key) - .map_err(|e| format!("Database backend error: {:?}", e))?; - recorder.record(*key, backend_value.clone()); - Ok(backend_value) - } else { - self.db - .get(0, &prefixed_key) - .map_err(|e| format!("Database backend error: {:?}", e)) - } + self.db + .get(0, &prefixed_key) + .map_err(|e| format!("Database backend error: {:?}", e)) } } @@ -82,7 +67,6 @@ pub struct BenchmarkingState { db: Cell>>, genesis: HashMap, (Vec, i32)>, record: Cell>>, - shared_cache: SharedCache, // shared cache is always empty /// Key tracker for keys in the main trie. /// We track the total number of reads and writes to these keys, /// not de-duplicated for repeats. @@ -93,9 +77,10 @@ pub struct BenchmarkingState { /// not de-duplicated for repeats. child_key_tracker: RefCell, LinkedHashMap, TrackedStorageKey>>>, whitelist: RefCell>, - proof_recorder: Option>, + proof_recorder: Option>>, proof_recorder_root: Cell, enable_tracking: bool, + shared_trie_cache: SharedTrieCache>, } impl BenchmarkingState { @@ -109,7 +94,7 @@ impl BenchmarkingState { let state_version = sp_runtime::StateVersion::default(); let mut root = B::Hash::default(); let mut mdb = MemoryDB::>::default(); - sp_state_machine::TrieDBMutV1::>::new(&mut mdb, &mut root); + sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); let mut state = BenchmarkingState { state: RefCell::new(None), @@ -118,13 +103,14 @@ impl BenchmarkingState { genesis: Default::default(), genesis_root: Default::default(), record: Default::default(), - shared_cache: new_shared_cache(0, (1, 10)), main_key_tracker: Default::default(), child_key_tracker: Default::default(), whitelist: Default::default(), proof_recorder: record_proof.then(Default::default), proof_recorder_root: Cell::new(root), enable_tracking, + // Enable the cache, but do not sync anything to the shared state. + shared_trie_cache: SharedTrieCache::new(CacheSize::Maximum(0)), }; state.add_whitelist_to_tracker(); @@ -160,16 +146,13 @@ impl BenchmarkingState { recorder.reset(); self.proof_recorder_root.set(self.root.get()); } - let storage_db = Arc::new(StorageDb:: { - db, - proof_recorder: self.proof_recorder.clone(), - _block: Default::default(), - }); - *self.state.borrow_mut() = Some(State::new( - DbState::::new(storage_db, self.root.get()), - self.shared_cache.clone(), - None, - )); + let storage_db = Arc::new(StorageDb:: { db, _block: Default::default() }); + *self.state.borrow_mut() = Some( + DbStateBuilder::::new(storage_db, self.root.get()) + .with_optional_recorder(self.proof_recorder.clone()) + .with_cache(self.shared_trie_cache.local_cache()) + .build(), + ); Ok(()) } @@ -324,6 +307,19 @@ impl StateBackend> for BenchmarkingState { .child_storage(child_info, key) } + fn child_storage_hash( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + self.add_read_key(Some(child_info.storage_key()), key); + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .child_storage_hash(child_info, key) + } + fn exists_storage(&self, key: &[u8]) -> Result { self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.exists_storage(key) @@ -604,22 +600,25 @@ impl StateBackend> for BenchmarkingState { fn proof_size(&self) -> Option { self.proof_recorder.as_ref().map(|recorder| { let proof_size = recorder.estimate_encoded_size() as u32; + let proof = recorder.to_storage_proof(); + let proof_recorder_root = self.proof_recorder_root.get(); if proof_recorder_root == Default::default() || proof_size == 1 { // empty trie proof_size - } else if let Some(size) = proof.encoded_compact_size::>(proof_recorder_root) - { - size as u32 } else { - panic!( - "proof rec root {:?}, root {:?}, genesis {:?}, rec_len {:?}", - self.proof_recorder_root.get(), - self.root.get(), - self.genesis_root, - proof_size, - ); + if let Some(size) = proof.encoded_compact_size::>(proof_recorder_root) { + size as u32 + } else { + panic!( + "proof rec root {:?}, root {:?}, genesis {:?}, rec_len {:?}", + self.proof_recorder_root.get(), + self.root.get(), + self.genesis_root, + proof_size, + ); + } } }) } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index f1adbd3df1a0f..465db08fe3afc 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -34,8 +34,8 @@ pub mod bench; mod children; mod parity_db; +mod record_stats_state; mod stats; -mod storage_cache; #[cfg(any(feature = "rocksdb", test))] mod upgrade; mod utils; @@ -51,15 +51,15 @@ use std::{ }; use crate::{ + record_stats_state::RecordStatsState, stats::StateUsageStats, - storage_cache::{new_shared_cache, CachingState, SharedCache, SyncingCachingState}, utils::{meta_keys, read_db, read_meta, DatabaseType, Meta}, }; use codec::{Decode, Encode}; use hash_db::Prefix; use sc_client_api::{ backend::NewBlockState, - leaves::{FinalizationDisplaced, LeafSet}, + leaves::{FinalizationOutcome, LeafSet}, utils::is_descendent_of, IoInfo, MemoryInfo, MemorySize, UsageInfo, }; @@ -83,10 +83,11 @@ use sp_runtime::{ Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ - backend::Backend as StateBackend, ChildStorageCollection, DBValue, IndexOperation, - OffchainChangesCollection, StateMachineStats, StorageCollection, UsageInfo as StateUsageInfo, + backend::{AsTrieBackend, Backend as StateBackend}, + ChildStorageCollection, DBValue, IndexOperation, OffchainChangesCollection, StateMachineStats, + StorageCollection, UsageInfo as StateUsageInfo, }; -use sp_trie::{prefixed_key, MemoryDB, PrefixedMemoryDB}; +use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, PrefixedMemoryDB}; // Re-export the Database trait so that one can pass an implementation of it. pub use sc_state_db::PruningMode; @@ -96,13 +97,16 @@ pub use bench::BenchmarkingState; const CACHE_HEADERS: usize = 8; -/// Default value for storage cache child ratio. -const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); - /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend>>, HashFor>; +/// Builder for [`DbState`]. +pub type DbStateBuilder = sp_state_machine::TrieBackendBuilder< + Arc>>, + HashFor, +>; + /// Length of a [`DbHash`]. const DB_HASH_LEN: usize = 32; @@ -174,6 +178,14 @@ impl StateBackend> for RefTrackingState { self.state.child_storage(child_info, key) } + fn child_storage_hash( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + self.state.child_storage_hash(child_info, key) + } + fn exists_storage(&self, key: &[u8]) -> Result { self.state.exists_storage(key) } @@ -272,12 +284,6 @@ impl StateBackend> for RefTrackingState { self.state.child_keys(child_info, prefix) } - fn as_trie_backend( - &self, - ) -> Option<&sp_state_machine::TrieBackend>> { - self.state.as_trie_backend() - } - fn register_overlay_stats(&self, stats: &StateMachineStats) { self.state.register_overlay_stats(stats); } @@ -287,12 +293,22 @@ impl StateBackend> for RefTrackingState { } } +impl AsTrieBackend> for RefTrackingState { + type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; + + fn as_trie_backend( + &self, + ) -> &sp_state_machine::TrieBackend> { + &self.state.as_trie_backend() + } +} + /// Database settings. pub struct DatabaseSettings { - /// State cache size. - pub state_cache_size: usize, - /// Ratio of cache size dedicated to child tries. - pub state_cache_child_ratio: Option<(usize, usize)>, + /// The maximum trie cache size in bytes. + /// + /// If `None` is given, the cache is disabled. + pub trie_cache_maximum_size: Option, /// Requested state pruning mode. pub state_pruning: Option, /// Where to find the database. @@ -300,12 +316,12 @@ pub struct DatabaseSettings { /// Block pruning mode. /// /// NOTE: only finalized blocks are subject for removal! - pub keep_blocks: KeepBlocks, + pub blocks_pruning: BlocksPruning, } /// Block pruning settings. #[derive(Debug, Clone, Copy)] -pub enum KeepBlocks { +pub enum BlocksPruning { /// Keep full block history. All, /// Keep N recent finalized blocks. @@ -730,7 +746,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { - old_state: SyncingCachingState, Block>, + old_state: RecordStatsState, Block>, db_updates: PrefixedMemoryDB>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, @@ -800,7 +816,7 @@ impl BlockImportOperation { impl sc_client_api::backend::BlockImportOperation for BlockImportOperation { - type State = SyncingCachingState, Block>; + type State = RecordStatsState, Block>; fn state(&self) -> ClientResult> { Ok(Some(&self.old_state)) @@ -949,7 +965,7 @@ impl EmptyStorage { let mut root = Block::Hash::default(); let mut mdb = MemoryDB::>::default(); // both triedbmut are the same on empty storage. - sp_state_machine::TrieDBMutV1::>::new(&mut mdb, &mut root); + sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); EmptyStorage(root) } } @@ -1009,13 +1025,13 @@ pub struct Backend { offchain_storage: offchain::LocalStorage, blockchain: BlockchainDb, canonicalization_delay: u64, - shared_cache: SharedCache, import_lock: Arc>, is_archive: bool, - keep_blocks: KeepBlocks, + blocks_pruning: BlocksPruning, io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, state_usage: Arc, genesis_state: RwLock>>>, + shared_trie_cache: Option>>, } impl Backend { @@ -1043,21 +1059,20 @@ impl Backend { /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self { - Self::new_test_with_tx_storage(keep_blocks, canonicalization_delay) + pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self { + Self::new_test_with_tx_storage(blocks_pruning, canonicalization_delay) } /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_with_tx_storage(keep_blocks: u32, canonicalization_delay: u64) -> Self { + pub fn new_test_with_tx_storage(blocks_pruning: u32, canonicalization_delay: u64) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); let db_setting = DatabaseSettings { - state_cache_size: 16777216, - state_cache_child_ratio: Some((50, 100)), - state_pruning: Some(PruningMode::keep_blocks(keep_blocks)), + trie_cache_maximum_size: Some(16 * 1024 * 1024), + state_pruning: Some(PruningMode::blocks_pruning(blocks_pruning)), source: DatabaseSource::Custom { db, require_create_flag: true }, - keep_blocks: KeepBlocks::Some(keep_blocks), + blocks_pruning: BlocksPruning::Some(blocks_pruning), }; Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") @@ -1116,16 +1131,15 @@ impl Backend { offchain_storage, blockchain, canonicalization_delay, - shared_cache: new_shared_cache( - config.state_cache_size, - config.state_cache_child_ratio.unwrap_or(DEFAULT_CHILD_RATIO), - ), import_lock: Default::default(), is_archive: is_archive_pruning, io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), state_usage: Arc::new(StateUsageStats::new()), - keep_blocks: config.keep_blocks, + blocks_pruning: config.blocks_pruning, genesis_state: RwLock::new(None), + shared_trie_cache: config.trie_cache_maximum_size.map(|maximum_size| { + SharedTrieCache::new(sp_trie::cache::CacheSize::Maximum(maximum_size)) + }), }; // Older DB versions have no last state key. Check if the state is available and set it. @@ -1194,7 +1208,7 @@ impl Backend { (&r.number, &r.hash) ); - return Err(::sp_blockchain::Error::NotInFinalizedChain) + return Err(sp_blockchain::Error::NotInFinalizedChain) } retracted.push(r.hash); @@ -1251,7 +1265,7 @@ impl Backend { header: &Block::Header, last_finalized: Option, justification: Option, - finalization_displaced: &mut Option>>, + finalization_displaced: &mut Option>>, ) -> ClientResult> { // TODO: ensure best chain contains this block. let number = *header.number(); @@ -1345,16 +1359,21 @@ impl Backend { let parent_hash = *pending_block.header.parent_hash(); let number = *pending_block.header.number(); + let highest_leaf = self + .blockchain + .leaves + .read() + .highest_leaf() + .map(|(n, _)| n) + .unwrap_or(Zero::zero()); let existing_header = - number <= best_num && self.blockchain.header(BlockId::hash(hash))?.is_some(); + number <= highest_leaf && self.blockchain.header(BlockId::hash(hash))?.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; - let (enacted, retracted) = if pending_block.leaf_state.is_best() { - self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))? - } else { - (Default::default(), Default::default()) + if pending_block.leaf_state.is_best() { + self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?; }; utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?; @@ -1481,14 +1500,22 @@ impl Backend { let header = &pending_block.header; let is_best = pending_block.leaf_state.is_best(); - debug!(target: "db", + debug!( + target: "db", "DB Commit {:?} ({}), best={}, state={}, existing={}, finalized={}", - hash, number, is_best, operation.commit_state, existing_header, finalized, + hash, + number, + is_best, + operation.commit_state, + existing_header, + finalized, ); self.state_usage.merge_sm(operation.old_state.usage_info()); + // release state reference so that it can be finalized - let cache = operation.old_state.into_cache_changes(); + // VERY IMPORTANT + drop(operation.old_state); if finalized { // TODO: ensure best chain contains this block. @@ -1577,20 +1604,20 @@ impl Backend { is_finalized: finalized, with_state: operation.commit_state, }); - Some((pending_block.header, number, hash, enacted, retracted, is_best, cache)) + Some((pending_block.header, hash)) } else { None }; - let cache_update = if let Some(set_head) = operation.set_head { + if let Some(set_head) = operation.set_head { if let Some(header) = sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)? { let number = header.number(); let hash = header.hash(); - let (enacted, retracted) = - self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?; + self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?; + meta_updates.push(MetaUpdate { hash, number: *number, @@ -1598,40 +1625,24 @@ impl Backend { is_finalized: false, with_state: false, }); - Some((enacted, retracted)) } else { return Err(sp_blockchain::Error::UnknownBlock(format!( "Cannot set head {:?}", set_head ))) } - } else { - None - }; + } self.storage.db.commit(transaction)?; // Apply all in-memory state changes. // Code beyond this point can't fail. - if let Some((header, number, hash, enacted, retracted, is_best, mut cache)) = imported { + if let Some((header, hash)) = imported { trace!(target: "db", "DB Commit done {:?}", hash); let header_metadata = CachedHeaderMetadata::from(&header); self.blockchain.insert_header_metadata(header_metadata.hash, header_metadata); cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header)); - cache.sync_cache( - &enacted, - &retracted, - operation.storage_updates, - operation.child_storage_updates, - Some(hash), - Some(number), - is_best, - ); - } - - if let Some((enacted, retracted)) = cache_update { - self.shared_cache.write().sync(&enacted, &retracted); } for m in meta_updates { @@ -1650,7 +1661,7 @@ impl Backend { transaction: &mut Transaction, f_header: &Block::Header, f_hash: Block::Hash, - displaced: &mut Option>>, + displaced: &mut Option>>, with_state: bool, ) -> ClientResult<()> { let f_num = *f_header.number(); @@ -1689,11 +1700,11 @@ impl Backend { &self, transaction: &mut Transaction, finalized: NumberFor, - displaced: &FinalizationDisplaced>, + displaced: &FinalizationOutcome>, ) -> ClientResult<()> { - if let KeepBlocks::Some(keep_blocks) = self.keep_blocks { + if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning { // Always keep the last finalized block - let keep = std::cmp::max(keep_blocks, 1); + let keep = std::cmp::max(blocks_pruning, 1); if finalized >= keep.into() { let number = finalized.saturating_sub(keep.into()); self.prune_block(transaction, BlockId::::number(number))?; @@ -1763,17 +1774,13 @@ impl Backend { Ok(()) } - fn empty_state(&self) -> ClientResult, Block>> { + fn empty_state(&self) -> ClientResult, Block>> { let root = EmptyStorage::::new().0; // Empty trie - let db_state = DbState::::new(self.storage.clone(), root); + let db_state = DbStateBuilder::::new(self.storage.clone(), root) + .with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache())) + .build(); let state = RefTrackingState::new(db_state, self.storage.clone(), None); - let caching_state = CachingState::new(state, self.shared_cache.clone(), None); - Ok(SyncingCachingState::new( - caching_state, - self.state_usage.clone(), - self.blockchain.meta.clone(), - self.import_lock.clone(), - )) + Ok(RecordStatsState::new(state, None, self.state_usage.clone())) } } @@ -1895,16 +1902,13 @@ where impl sc_client_api::backend::Backend for Backend { type BlockImportOperation = BlockImportOperation; type Blockchain = BlockchainDb; - type State = SyncingCachingState, Block>; + type State = RecordStatsState, Block>; type OffchainStorage = offchain::LocalStorage; fn begin_operation(&self) -> ClientResult { - let mut old_state = self.empty_state()?; - old_state.disable_syncing(); - Ok(BlockImportOperation { pending_block: None, - old_state, + old_state: self.empty_state()?, db_updates: PrefixedMemoryDB::default(), storage_updates: Default::default(), child_storage_updates: Default::default(), @@ -1927,7 +1931,6 @@ impl sc_client_api::backend::Backend for Backend { } else { operation.old_state = self.state_at(block)?; } - operation.old_state.disable_syncing(); operation.commit_state = true; Ok(()) @@ -2028,8 +2031,9 @@ impl sc_client_api::backend::Backend for Backend { ) }); let database_cache = MemorySize::from_bytes(0); - let state_cache = - MemorySize::from_bytes(self.shared_cache.read().used_storage_cache_size()); + let state_cache = MemorySize::from_bytes( + self.shared_trie_cache.as_ref().map_or(0, |c| c.used_memory_size()), + ); let state_db = self.storage.state_db.memory_info(); Some(UsageInfo { @@ -2057,36 +2061,46 @@ impl sc_client_api::backend::Backend for Backend { ) -> ClientResult<(NumberFor, HashSet)> { let mut reverted_finalized = HashSet::new(); - let mut best_number = self.blockchain.info().best_number; - let mut best_hash = self.blockchain.info().best_hash; + let info = self.blockchain.info(); + + let highest_leaf = self + .blockchain + .leaves + .read() + .highest_leaf() + .and_then(|(n, h)| h.last().map(|h| (n, *h))); + + let best_number = info.best_number; + let best_hash = info.best_hash; - let finalized = self.blockchain.info().finalized_number; + let finalized = info.finalized_number; let revertible = best_number - finalized; let n = if !revert_finalized && revertible < n { revertible } else { n }; + let (n, mut number_to_revert, mut hash_to_revert) = match highest_leaf { + Some((l_n, l_h)) => (n + (l_n - best_number), l_n, l_h), + None => (n, best_number, best_hash), + }; + let mut revert_blocks = || -> ClientResult> { for c in 0..n.saturated_into::() { - if best_number.is_zero() { + if number_to_revert.is_zero() { return Ok(c.saturated_into::>()) } let mut transaction = Transaction::new(); let removed = - self.blockchain.header(BlockId::Number(best_number))?.ok_or_else(|| { + self.blockchain.header(BlockId::Hash(hash_to_revert))?.ok_or_else(|| { sp_blockchain::Error::UnknownBlock(format!( - "Error reverting to {}. Block hash not found.", - best_number + "Error reverting to {}. Block header not found.", + hash_to_revert, )) })?; let removed_hash = removed.hash(); - let prev_number = best_number.saturating_sub(One::one()); - let prev_hash = self.blockchain.hash(prev_number)?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!( - "Error reverting to {}. Block hash not found.", - best_number - )) - })?; + let prev_number = number_to_revert.saturating_sub(One::one()); + let prev_hash = + if prev_number == best_number { best_hash } else { *removed.parent_hash() }; if !self.have_state_at(&prev_hash, prev_number) { return Ok(c.saturated_into::>()) @@ -2096,12 +2110,15 @@ impl sc_client_api::backend::Backend for Backend { Some(commit) => { apply_state_commit(&mut transaction, commit); - best_number = prev_number; - best_hash = prev_hash; + number_to_revert = prev_number; + hash_to_revert = prev_hash; - let update_finalized = best_number < finalized; + let update_finalized = number_to_revert < finalized; - let key = utils::number_and_hash_to_lookup_key(best_number, &best_hash)?; + let key = utils::number_and_hash_to_lookup_key( + number_to_revert, + &hash_to_revert, + )?; if update_finalized { transaction.set_from_vec( columns::META, @@ -2111,12 +2128,14 @@ impl sc_client_api::backend::Backend for Backend { reverted_finalized.insert(removed_hash); if let Some((hash, _)) = self.blockchain.info().finalized_state { - if hash == best_hash { - if !best_number.is_zero() && - self.have_state_at(&prev_hash, best_number - One::one()) - { + if hash == hash_to_revert { + if !number_to_revert.is_zero() && + self.have_state_at( + &prev_hash, + number_to_revert - One::one(), + ) { let lookup_key = utils::number_and_hash_to_lookup_key( - best_number - One::one(), + number_to_revert - One::one(), prev_hash, )?; transaction.set_from_vec( @@ -2137,13 +2156,16 @@ impl sc_client_api::backend::Backend for Backend { &mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, - best_hash, + hash_to_revert, ); self.storage.db.commit(transaction)?; + + let is_best = number_to_revert < best_number; + self.blockchain.update_meta(MetaUpdate { - hash: best_hash, - number: best_number, - is_best: true, + hash: hash_to_revert, + number: number_to_revert, + is_best, is_finalized: update_finalized, with_state: false, }); @@ -2161,7 +2183,7 @@ impl sc_client_api::backend::Backend for Backend { let mut transaction = Transaction::new(); let mut leaves = self.blockchain.leaves.write(); - leaves.revert(best_hash, best_number); + leaves.revert(hash_to_revert, number_to_revert); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); self.storage.db.commit(transaction)?; @@ -2201,9 +2223,40 @@ impl sc_client_api::backend::Backend for Backend { apply_state_commit(&mut transaction, commit); } transaction.remove(columns::KEY_LOOKUP, hash.as_ref()); - leaves.revert(*hash, hdr.number); + + let children: Vec<_> = self + .blockchain() + .children(hdr.parent)? + .into_iter() + .filter(|child_hash| child_hash != hash) + .collect(); + let parent_leaf = if children.is_empty() { + children::remove_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + hdr.parent, + ); + Some(hdr.parent) + } else { + children::write_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + hdr.parent, + children, + ); + None + }; + + let remove_outcome = leaves.remove(*hash, hdr.number, parent_leaf); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); - self.storage.db.commit(transaction)?; + if let Err(e) = self.storage.db.commit(transaction) { + if let Some(outcome) = remove_outcome { + leaves.undo().undo_remove(outcome); + } + return Err(e.into()) + } self.blockchain().remove_header_metadata(*hash); Ok(()) } @@ -2222,17 +2275,13 @@ impl sc_client_api::backend::Backend for Backend { }; if is_genesis { if let Some(genesis_state) = &*self.genesis_state.read() { - let db_state = DbState::::new(genesis_state.clone(), genesis_state.root); + let root = genesis_state.root; + let db_state = DbStateBuilder::::new(genesis_state.clone(), root) + .with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache())) + .build(); + let state = RefTrackingState::new(db_state, self.storage.clone(), None); - let caching_state = CachingState::new(state, self.shared_cache.clone(), None); - let mut state = SyncingCachingState::new( - caching_state, - self.state_usage.clone(), - self.blockchain.meta.clone(), - self.import_lock.clone(), - ); - state.disable_syncing(); - return Ok(state) + return Ok(RecordStatsState::new(state, None, self.state_usage.clone())) } } @@ -2253,16 +2302,13 @@ impl sc_client_api::backend::Backend for Backend { } if let Ok(()) = self.storage.state_db.pin(&hash) { let root = hdr.state_root; - let db_state = DbState::::new(self.storage.clone(), root); + let db_state = DbStateBuilder::::new(self.storage.clone(), root) + .with_optional_cache( + self.shared_trie_cache.as_ref().map(|c| c.local_cache()), + ) + .build(); let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash)); - let caching_state = - CachingState::new(state, self.shared_cache.clone(), Some(hash)); - Ok(SyncingCachingState::new( - caching_state, - self.state_usage.clone(), - self.blockchain.meta.clone(), - self.import_lock.clone(), - )) + Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone())) } else { Err(sp_blockchain::Error::UnknownBlock(format!( "State already discarded for {:?}", @@ -2292,7 +2338,7 @@ impl sc_client_api::backend::Backend for Backend { } fn get_import_lock(&self) -> &RwLock<()> { - &*self.import_lock + &self.import_lock } fn requires_full_sync(&self) -> bool { @@ -2438,11 +2484,10 @@ pub(crate) mod tests { let backend = Backend::::new( DatabaseSettings { - state_cache_size: 16777216, - state_cache_child_ratio: Some((50, 100)), - state_pruning: Some(PruningMode::keep_blocks(1)), + trie_cache_maximum_size: Some(16 * 1024 * 1024), + state_pruning: Some(PruningMode::blocks_pruning(1)), source: DatabaseSource::Custom { db: backing, require_create_flag: false }, - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, }, 0, ) @@ -3351,7 +3396,21 @@ pub(crate) mod tests { prev_hash = hash; } - // insert a fork at block 2, which becomes best block + for i in 0..2 { + let hash = insert_block( + &backend, + 2, + blocks[1], + None, + sp_core::H256::random(), + vec![i.into()], + None, + ) + .unwrap(); + blocks.push(hash); + } + + // insert a fork at block 1, which becomes best block let best_hash = insert_block( &backend, 1, @@ -3362,11 +3421,36 @@ pub(crate) mod tests { None, ) .unwrap(); + + assert_eq!(backend.blockchain().info().best_hash, best_hash); assert!(backend.remove_leaf_block(&best_hash).is_err()); - assert!(backend.have_state_at(&prev_hash, 1)); - backend.remove_leaf_block(&prev_hash).unwrap(); - assert_eq!(None, backend.blockchain().header(BlockId::hash(prev_hash.clone())).unwrap()); - assert!(!backend.have_state_at(&prev_hash, 1)); + + assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], blocks[3], best_hash]); + assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2], blocks[3]]); + + assert!(backend.have_state_at(&blocks[3], 2)); + assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_some()); + backend.remove_leaf_block(&blocks[3]).unwrap(); + assert!(!backend.have_state_at(&blocks[3], 2)); + assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_none()); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], best_hash]); + assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2]]); + + assert!(backend.have_state_at(&blocks[2], 2)); + assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_some()); + backend.remove_leaf_block(&blocks[2]).unwrap(); + assert!(!backend.have_state_at(&blocks[2], 2)); + assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_none()); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash, blocks[1]]); + assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![]); + + assert!(backend.have_state_at(&blocks[1], 1)); + assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_some()); + backend.remove_leaf_block(&blocks[1]).unwrap(); + assert!(!backend.have_state_at(&blocks[1], 1)); + assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_none()); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash]); + assert_eq!(backend.blockchain().children(blocks[0]).unwrap(), vec![best_hash]); } #[test] @@ -3463,4 +3547,112 @@ pub(crate) mod tests { insert_header_no_head(&backend, 1, block0, [1; 32].into()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]); } + + #[test] + fn revert_non_best_blocks() { + let backend = Backend::::new_test(10, 10); + + let genesis = + insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None) + .unwrap(); + + let block1 = + insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap(); + + let block2 = + insert_block(&backend, 2, block1, None, Default::default(), vec![], None).unwrap(); + + let block3 = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Number(1)).unwrap(); + let header = Header { + number: 3, + parent_hash: block2, + state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + header.hash() + }; + + let block4 = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + let header = Header { + number: 4, + parent_hash: block3, + state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + header.hash() + }; + + let block3_fork = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + let header = Header { + number: 3, + parent_hash: block2, + state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), + digest: Default::default(), + extrinsics_root: H256::from_low_u64_le(42), + }; + + op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + header.hash() + }; + + assert!(backend.have_state_at(&block1, 1)); + assert!(backend.have_state_at(&block2, 2)); + assert!(backend.have_state_at(&block3, 3)); + assert!(backend.have_state_at(&block4, 4)); + assert!(backend.have_state_at(&block3_fork, 3)); + + assert_eq!(backend.blockchain.leaves().unwrap(), vec![block4, block3_fork]); + assert_eq!(4, backend.blockchain.leaves.read().highest_leaf().unwrap().0); + + assert_eq!(3, backend.revert(1, false).unwrap().0); + + assert!(backend.have_state_at(&block1, 1)); + assert!(!backend.have_state_at(&block2, 2)); + assert!(!backend.have_state_at(&block3, 3)); + assert!(!backend.have_state_at(&block4, 4)); + assert!(!backend.have_state_at(&block3_fork, 3)); + + assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]); + assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0); + } + + #[test] + fn test_no_duplicated_leaves_allowed() { + let backend: Backend = Backend::new_test(10, 10); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); + // Add block 2 not as the best block + let block2 = insert_header_no_head(&backend, 2, block1, Default::default()); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]); + assert_eq!(backend.blockchain().info().best_hash, block1); + + // Add block 2 as the best block + let block2 = insert_header(&backend, 2, block1, None, Default::default()); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]); + assert_eq!(backend.blockchain().info().best_hash, block2); + } } diff --git a/client/db/src/record_stats_state.rs b/client/db/src/record_stats_state.rs new file mode 100644 index 0000000000000..0b51d3fef2127 --- /dev/null +++ b/client/db/src/record_stats_state.rs @@ -0,0 +1,230 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Provides [`RecordStatsState`] for recording stats about state access. + +use crate::stats::StateUsageStats; +use sp_core::storage::ChildInfo; +use sp_runtime::{ + traits::{Block as BlockT, HashFor}, + StateVersion, +}; +use sp_state_machine::{ + backend::{AsTrieBackend, Backend as StateBackend}, + TrieBackend, +}; +use std::sync::Arc; + +/// State abstraction for recording stats about state access. +pub struct RecordStatsState { + /// Usage statistics + usage: StateUsageStats, + /// State machine registered stats + overlay_stats: sp_state_machine::StateMachineStats, + /// Backing state. + state: S, + /// The hash of the block is state belongs to. + block_hash: Option, + /// The usage statistics of the backend. These will be updated on drop. + state_usage: Arc, +} + +impl std::fmt::Debug for RecordStatsState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Block {:?}", self.block_hash) + } +} + +impl Drop for RecordStatsState { + fn drop(&mut self) { + self.state_usage.merge_sm(self.usage.take()); + } +} + +impl>, B: BlockT> RecordStatsState { + /// Create a new instance wrapping generic State and shared cache. + pub(crate) fn new( + state: S, + block_hash: Option, + state_usage: Arc, + ) -> Self { + RecordStatsState { + usage: StateUsageStats::new(), + overlay_stats: sp_state_machine::StateMachineStats::default(), + state, + block_hash, + state_usage, + } + } +} + +impl>, B: BlockT> StateBackend> for RecordStatsState { + type Error = S::Error; + type Transaction = S::Transaction; + type TrieBackendStorage = S::TrieBackendStorage; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + let value = self.state.storage(key)?; + self.usage.tally_key_read(key, value.as_ref(), false); + Ok(value) + } + + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.state.storage_hash(key) + } + + fn child_storage( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + let key = (child_info.storage_key().to_vec(), key.to_vec()); + let value = self.state.child_storage(child_info, &key.1)?; + + // just pass it through the usage counter + let value = self.usage.tally_child_key_read(&key, value, false); + + Ok(value) + } + + fn child_storage_hash( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + self.state.child_storage_hash(child_info, key) + } + + fn exists_storage(&self, key: &[u8]) -> Result { + self.state.exists_storage(key) + } + + fn exists_child_storage( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result { + self.state.exists_child_storage(child_info, key) + } + + fn apply_to_key_values_while, Vec) -> bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.state + .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + + fn apply_to_keys_while bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + ) { + self.state.apply_to_keys_while(child_info, prefix, start_at, f) + } + + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + self.state.next_storage_key(key) + } + + fn next_child_storage_key( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.next_child_storage_key(child_info, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.state.for_keys_with_prefix(prefix, f) + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + self.state.for_key_values_with_prefix(prefix, f) + } + + fn for_child_keys_with_prefix( + &self, + child_info: &ChildInfo, + prefix: &[u8], + f: F, + ) { + self.state.for_child_keys_with_prefix(child_info, prefix, f) + } + + fn storage_root<'a>( + &self, + delta: impl Iterator)>, + state_version: StateVersion, + ) -> (B::Hash, Self::Transaction) + where + B::Hash: Ord, + { + self.state.storage_root(delta, state_version) + } + + fn child_storage_root<'a>( + &self, + child_info: &ChildInfo, + delta: impl Iterator)>, + state_version: StateVersion, + ) -> (B::Hash, bool, Self::Transaction) + where + B::Hash: Ord, + { + self.state.child_storage_root(child_info, delta, state_version) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.state.pairs() + } + + fn keys(&self, prefix: &[u8]) -> Vec> { + self.state.keys(prefix) + } + + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { + self.state.child_keys(child_info, prefix) + } + + fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) { + self.overlay_stats.add(stats); + } + + fn usage_info(&self) -> sp_state_machine::UsageInfo { + let mut info = self.usage.take(); + info.include_state_machine_states(&self.overlay_stats); + info + } +} + +impl> + AsTrieBackend>, B: BlockT> AsTrieBackend> + for RecordStatsState +{ + type TrieBackendStorage = >>::TrieBackendStorage; + + fn as_trie_backend(&self) -> &TrieBackend> { + self.state.as_trie_backend() + } +} diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 8326946999946..d9253fe09eb50 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -1223,7 +1223,7 @@ mod tests { let mut s = CachingState::new( InMemoryBackend::::default(), shared.clone(), - Some(root_parent.clone()), + Some(root_parent), ); let key = H256::random()[..].to_vec(); @@ -1307,14 +1307,14 @@ mod tests { let mut s = CachingState::new( InMemoryBackend::::default(), shared.clone(), - Some(root_parent.clone()), + Some(root_parent), ); s.cache.sync_cache( &[], &[], vec![(key.clone(), Some(vec![2]))], vec![], - Some(h0.clone()), + Some(h0), Some(0), true, ); diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 566ed0a50fc0f..6a4b2a44a2d44 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] lazy_static = "1.4.0" lru = "0.7.5" -parking_lot = "0.12.0" +parking_lot = "0.12.1" tracing = "0.1.29" wasmi = "0.9.1" diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 9ffdfb788474d..b16ed7e8552ce 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -23,7 +23,6 @@ wasmi = "0.9.1" sc-allocator = { version = "4.1.0-dev", path = "../../allocator" } sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../../primitives/maybe-compressed-blob" } sp-sandbox = { version = "0.10.0-dev", path = "../../../primitives/sandbox" } -sp-serializer = { version = "4.0.0-dev", path = "../../../primitives/serializer" } sp-wasm-interface = { version = "6.0.0", path = "../../../primitives/wasm-interface" } [features] diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index 09e070bb3bae5..376ac190bd7b7 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -18,7 +18,6 @@ //! Rust executor possible errors. -use sp_serializer; use wasmi; /// Result type alias. @@ -28,9 +27,6 @@ pub type Result = std::result::Result; #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { - #[error("Unserializable data encountered")] - InvalidData(#[from] sp_serializer::Error), - #[error(transparent)] Wasmi(#[from] wasmi::Error), diff --git a/client/executor/common/src/wasm_runtime.rs b/client/executor/common/src/wasm_runtime.rs index d43ad758b144c..d0cc8926144be 100644 --- a/client/executor/common/src/wasm_runtime.rs +++ b/client/executor/common/src/wasm_runtime.rs @@ -21,6 +21,8 @@ use crate::error::Error; use sp_wasm_interface::Value; +pub use sc_allocator::AllocationStats; + /// A method to be used to find the entrypoint when calling into the runtime /// /// Contains variants on how to resolve wasm function that will be invoked. @@ -78,7 +80,20 @@ pub trait WasmInstance: Send { /// Before execution, instance is reset. /// /// Returns the encoded result on success. - fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result, Error>; + fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result, Error> { + self.call_with_allocation_stats(method, data).0 + } + + /// Call a method on this WASM instance. + /// + /// Before execution, instance is reset. + /// + /// Returns the encoded result on success. + fn call_with_allocation_stats( + &mut self, + method: InvokeMethod, + data: &[u8], + ) -> (Result, Error>, Option); /// Call an exported method on this WASM instance. /// diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 0c61d6fcd38a2..5a741f51c3899 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -54,287 +54,287 @@ static mut MUTABLE_STATIC: u64 = 32; static mut MUTABLE_STATIC_BSS: u64 = 0; sp_core::wasm_export_functions! { - fn test_calling_missing_external() { - unsafe { missing_external() } - } + fn test_calling_missing_external() { + unsafe { missing_external() } + } - fn test_calling_yet_another_missing_external() { - unsafe { yet_another_missing_external() } - } + fn test_calling_yet_another_missing_external() { + unsafe { yet_another_missing_external() } + } - fn test_data_in(input: Vec) -> Vec { - print("set_storage"); - storage::set(b"input", &input); + fn test_data_in(input: Vec) -> Vec { + print("set_storage"); + storage::set(b"input", &input); - print("storage"); - let foo = storage::get(b"foo").unwrap(); + print("storage"); + let foo = storage::get(b"foo").unwrap(); - print("set_storage"); - storage::set(b"baz", &foo); + print("set_storage"); + storage::set(b"baz", &foo); - print("finished!"); - b"all ok!".to_vec() - } + print("finished!"); + b"all ok!".to_vec() + } - fn test_clear_prefix(input: Vec) -> Vec { - storage::clear_prefix(&input, None); - b"all ok!".to_vec() - } + fn test_clear_prefix(input: Vec) -> Vec { + storage::clear_prefix(&input, None); + b"all ok!".to_vec() + } - fn test_empty_return() {} + fn test_empty_return() {} - fn test_dirty_plenty_memory(heap_base: u32, heap_pages: u32) { - // This piece of code will dirty multiple pages of memory. The number of pages is given by - // the `heap_pages`. It's unit is a wasm page (64KiB). The first page to be cleared - // is a wasm page that that follows the one that holds the `heap_base` address. - // - // This function dirties the **host** pages. I.e. we dirty 4KiB at a time and it will take - // 16 writes to process a single wasm page. + fn test_dirty_plenty_memory(heap_base: u32, heap_pages: u32) { + // This piece of code will dirty multiple pages of memory. The number of pages is given by + // the `heap_pages`. It's unit is a wasm page (64KiB). The first page to be cleared + // is a wasm page that that follows the one that holds the `heap_base` address. + // + // This function dirties the **host** pages. I.e. we dirty 4KiB at a time and it will take + // 16 writes to process a single wasm page. - let heap_ptr = heap_base as usize; + let heap_ptr = heap_base as usize; - // Find the next wasm page boundary. - let heap_ptr = round_up_to(heap_ptr, 65536); + // Find the next wasm page boundary. + let heap_ptr = round_up_to(heap_ptr, 65536); - // Make it an actual pointer - let heap_ptr = heap_ptr as *mut u8; + // Make it an actual pointer + let heap_ptr = heap_ptr as *mut u8; - // Traverse the host pages and make each one dirty - let host_pages = heap_pages as usize * 16; - for i in 0..host_pages { - unsafe { - // technically this is an UB, but there is no way Rust can find this out. - heap_ptr.add(i * 4096).write(0); - } - } + // Traverse the host pages and make each one dirty + let host_pages = heap_pages as usize * 16; + for i in 0..host_pages { + unsafe { + // technically this is an UB, but there is no way Rust can find this out. + heap_ptr.add(i * 4096).write(0); + } + } - fn round_up_to(n: usize, divisor: usize) -> usize { - (n + divisor - 1) / divisor - } - } + fn round_up_to(n: usize, divisor: usize) -> usize { + (n + divisor - 1) / divisor + } + } fn test_allocate_vec(size: u32) -> Vec { Vec::with_capacity(size as usize) } - fn test_fp_f32add(a: [u8; 4], b: [u8; 4]) -> [u8; 4] { - let a = f32::from_le_bytes(a); - let b = f32::from_le_bytes(b); - f32::to_le_bytes(a + b) - } - - fn test_panic() { panic!("test panic") } - - fn test_conditional_panic(input: Vec) -> Vec { - if input.len() > 0 { - panic!("test panic") - } - - input - } - - fn test_blake2_256(input: Vec) -> Vec { - blake2_256(&input).to_vec() - } - - fn test_blake2_128(input: Vec) -> Vec { - blake2_128(&input).to_vec() - } - - fn test_sha2_256(input: Vec) -> Vec { - sha2_256(&input).to_vec() - } - - fn test_twox_256(input: Vec) -> Vec { - twox_256(&input).to_vec() - } - - fn test_twox_128(input: Vec) -> Vec { - twox_128(&input).to_vec() - } - - fn test_ed25519_verify(input: Vec) -> bool { - let mut pubkey = [0; 32]; - let mut sig = [0; 64]; - - pubkey.copy_from_slice(&input[0..32]); - sig.copy_from_slice(&input[32..96]); - - let msg = b"all ok!"; - ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) - } - - fn test_sr25519_verify(input: Vec) -> bool { - let mut pubkey = [0; 32]; - let mut sig = [0; 64]; - - pubkey.copy_from_slice(&input[0..32]); - sig.copy_from_slice(&input[32..96]); - - let msg = b"all ok!"; - sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) - } - - fn test_ordered_trie_root() -> Vec { - BlakeTwo256::ordered_trie_root( - vec![ - b"zero"[..].into(), - b"one"[..].into(), - b"two"[..].into(), - ], - sp_core::storage::StateVersion::V1, - ).as_ref().to_vec() - } - - fn test_offchain_index_set() { - sp_io::offchain_index::set(b"k", b"v"); - } - - fn test_offchain_local_storage() -> bool { - let kind = sp_core::offchain::StorageKind::PERSISTENT; - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); - sp_io::offchain::local_storage_set(kind, b"test", b"asd"); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"asd".to_vec())); - - let res = sp_io::offchain::local_storage_compare_and_set( - kind, - b"test", - Some(b"asd".to_vec()), - b"", - ); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"".to_vec())); - res - } - - fn test_offchain_local_storage_with_none() { - let kind = sp_core::offchain::StorageKind::PERSISTENT; - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); - - let res = sp_io::offchain::local_storage_compare_and_set(kind, b"test", None, b"value"); - assert_eq!(res, true); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"value".to_vec())); - } - - fn test_offchain_http() -> bool { - use sp_core::offchain::HttpRequestStatus; - let run = || -> Option<()> { - let id = sp_io::offchain::http_request_start( - "POST", - "http://localhost:12345", - &[], - ).ok()?; - sp_io::offchain::http_request_add_header(id, "X-Auth", "test").ok()?; - sp_io::offchain::http_request_write_body(id, &[1, 2, 3, 4], None).ok()?; - sp_io::offchain::http_request_write_body(id, &[], None).ok()?; - let status = sp_io::offchain::http_response_wait(&[id], None); - assert!(status == vec![HttpRequestStatus::Finished(200)], "Expected Finished(200) status."); - let headers = sp_io::offchain::http_response_headers(id); - assert_eq!(headers, vec![(b"X-Auth".to_vec(), b"hello".to_vec())]); - let mut buffer = vec![0; 64]; - let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; - assert_eq!(read, 3); - assert_eq!(&buffer[0..read as usize], &[1, 2, 3]); - let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; - assert_eq!(read, 0); - - Some(()) - }; - - run().is_some() - } - - fn test_enter_span() -> u64 { - wasm_tracing::enter_span(Default::default()) - } - - fn test_exit_span(span_id: u64) { - wasm_tracing::exit(span_id) - } - - fn test_nested_spans() { - sp_io::init_tracing(); - let span_id = wasm_tracing::enter_span(Default::default()); - { - sp_io::init_tracing(); - let span_id = wasm_tracing::enter_span(Default::default()); - wasm_tracing::exit(span_id); - } - wasm_tracing::exit(span_id); - } - - fn returns_mutable_static() -> u64 { - unsafe { - MUTABLE_STATIC += 1; - MUTABLE_STATIC - } - } - - fn returns_mutable_static_bss() -> u64 { - unsafe { - MUTABLE_STATIC_BSS += 1; - MUTABLE_STATIC_BSS - } - } - - fn allocates_huge_stack_array(trap: bool) -> Vec { - // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). - // This will just decrease (stacks in wasm32-u-u grow downwards) the stack - // pointer. This won't trap on the current compilers. - let mut data = [0u8; 1024 * 768]; - - // Then make sure we actually write something to it. - // - // If: - // 1. the stack area is placed at the beginning of the linear memory space, and - // 2. the stack pointer points to out-of-bounds area, and - // 3. a write is performed around the current stack pointer. - // - // then a trap should happen. - // - for (i, v) in data.iter_mut().enumerate() { - *v = i as u8; // deliberate truncation - } - - if trap { - // There is a small chance of this to be pulled up in theory. In practice - // the probability of that is rather low. - panic!() - } - - data.to_vec() - } - - // Check that the heap at `heap_base + offset` don't contains the test message. - // After the check succeeds the test message is written into the heap. - // - // It is expected that the given pointer is not allocated. - fn check_and_set_in_heap(heap_base: u32, offset: u32) { - let test_message = b"Hello invalid heap memory"; - let ptr = (heap_base + offset) as *mut u8; - - let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; - - assert_ne!(test_message, message_slice); - message_slice.copy_from_slice(test_message); - } - - fn test_spawn() { - let data = vec![1u8, 2u8]; - let data_new = sp_tasks::spawn(tasks::incrementer, data).join(); - - assert_eq!(data_new, vec![2u8, 3u8]); - } - - fn test_nested_spawn() { - let data = vec![7u8, 13u8]; - let data_new = sp_tasks::spawn(tasks::parallel_incrementer, data).join(); - - assert_eq!(data_new, vec![10u8, 16u8]); - } - - fn test_panic_in_spawned() { - sp_tasks::spawn(tasks::panicker, vec![]).join(); - } + fn test_fp_f32add(a: [u8; 4], b: [u8; 4]) -> [u8; 4] { + let a = f32::from_le_bytes(a); + let b = f32::from_le_bytes(b); + f32::to_le_bytes(a + b) + } + + fn test_panic() { panic!("test panic") } + + fn test_conditional_panic(input: Vec) -> Vec { + if input.len() > 0 { + panic!("test panic") + } + + input + } + + fn test_blake2_256(input: Vec) -> Vec { + blake2_256(&input).to_vec() + } + + fn test_blake2_128(input: Vec) -> Vec { + blake2_128(&input).to_vec() + } + + fn test_sha2_256(input: Vec) -> Vec { + sha2_256(&input).to_vec() + } + + fn test_twox_256(input: Vec) -> Vec { + twox_256(&input).to_vec() + } + + fn test_twox_128(input: Vec) -> Vec { + twox_128(&input).to_vec() + } + + fn test_ed25519_verify(input: Vec) -> bool { + let mut pubkey = [0; 32]; + let mut sig = [0; 64]; + + pubkey.copy_from_slice(&input[0..32]); + sig.copy_from_slice(&input[32..96]); + + let msg = b"all ok!"; + ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) + } + + fn test_sr25519_verify(input: Vec) -> bool { + let mut pubkey = [0; 32]; + let mut sig = [0; 64]; + + pubkey.copy_from_slice(&input[0..32]); + sig.copy_from_slice(&input[32..96]); + + let msg = b"all ok!"; + sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) + } + + fn test_ordered_trie_root() -> Vec { + BlakeTwo256::ordered_trie_root( + vec![ + b"zero"[..].into(), + b"one"[..].into(), + b"two"[..].into(), + ], + sp_core::storage::StateVersion::V1, + ).as_ref().to_vec() + } + + fn test_offchain_index_set() { + sp_io::offchain_index::set(b"k", b"v"); + } + + fn test_offchain_local_storage() -> bool { + let kind = sp_core::offchain::StorageKind::PERSISTENT; + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); + sp_io::offchain::local_storage_set(kind, b"test", b"asd"); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"asd".to_vec())); + + let res = sp_io::offchain::local_storage_compare_and_set( + kind, + b"test", + Some(b"asd".to_vec()), + b"", + ); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"".to_vec())); + res + } + + fn test_offchain_local_storage_with_none() { + let kind = sp_core::offchain::StorageKind::PERSISTENT; + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); + + let res = sp_io::offchain::local_storage_compare_and_set(kind, b"test", None, b"value"); + assert_eq!(res, true); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"value".to_vec())); + } + + fn test_offchain_http() -> bool { + use sp_core::offchain::HttpRequestStatus; + let run = || -> Option<()> { + let id = sp_io::offchain::http_request_start( + "POST", + "http://localhost:12345", + &[], + ).ok()?; + sp_io::offchain::http_request_add_header(id, "X-Auth", "test").ok()?; + sp_io::offchain::http_request_write_body(id, &[1, 2, 3, 4], None).ok()?; + sp_io::offchain::http_request_write_body(id, &[], None).ok()?; + let status = sp_io::offchain::http_response_wait(&[id], None); + assert!(status == vec![HttpRequestStatus::Finished(200)], "Expected Finished(200) status."); + let headers = sp_io::offchain::http_response_headers(id); + assert_eq!(headers, vec![(b"X-Auth".to_vec(), b"hello".to_vec())]); + let mut buffer = vec![0; 64]; + let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; + assert_eq!(read, 3); + assert_eq!(&buffer[0..read as usize], &[1, 2, 3]); + let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; + assert_eq!(read, 0); + + Some(()) + }; + + run().is_some() + } + + fn test_enter_span() -> u64 { + wasm_tracing::enter_span(Default::default()) + } + + fn test_exit_span(span_id: u64) { + wasm_tracing::exit(span_id) + } + + fn test_nested_spans() { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + wasm_tracing::exit(span_id); + } + wasm_tracing::exit(span_id); + } + + fn returns_mutable_static() -> u64 { + unsafe { + MUTABLE_STATIC += 1; + MUTABLE_STATIC + } + } + + fn returns_mutable_static_bss() -> u64 { + unsafe { + MUTABLE_STATIC_BSS += 1; + MUTABLE_STATIC_BSS + } + } + + fn allocates_huge_stack_array(trap: bool) -> Vec { + // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). + // This will just decrease (stacks in wasm32-u-u grow downwards) the stack + // pointer. This won't trap on the current compilers. + let mut data = [0u8; 1024 * 768]; + + // Then make sure we actually write something to it. + // + // If: + // 1. the stack area is placed at the beginning of the linear memory space, and + // 2. the stack pointer points to out-of-bounds area, and + // 3. a write is performed around the current stack pointer. + // + // then a trap should happen. + // + for (i, v) in data.iter_mut().enumerate() { + *v = i as u8; // deliberate truncation + } + + if trap { + // There is a small chance of this to be pulled up in theory. In practice + // the probability of that is rather low. + panic!() + } + + data.to_vec() + } + + // Check that the heap at `heap_base + offset` don't contains the test message. + // After the check succeeds the test message is written into the heap. + // + // It is expected that the given pointer is not allocated. + fn check_and_set_in_heap(heap_base: u32, offset: u32) { + let test_message = b"Hello invalid heap memory"; + let ptr = (heap_base + offset) as *mut u8; + + let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; + + assert_ne!(test_message, message_slice); + message_slice.copy_from_slice(test_message); + } + + fn test_spawn() { + let data = vec![1u8, 2u8]; + let data_new = sp_tasks::spawn(tasks::incrementer, data).join(); + + assert_eq!(data_new, vec![2u8, 3u8]); + } + + fn test_nested_spawn() { + let data = vec![7u8, 13u8]; + let data_new = sp_tasks::spawn(tasks::parallel_incrementer, data).join(); + + assert_eq!(data_new, vec![10u8, 16u8]); + } + + fn test_panic_in_spawned() { + sp_tasks::spawn(tasks::panicker, vec![]).join(); + } fn test_return_i8() -> i8 { -66 @@ -351,6 +351,11 @@ sp_core::wasm_export_functions! { fn test_unreachable_intrinsic() { core::arch::wasm32::unreachable() } + + fn test_return_value() -> u64 { + // Mainly a test that the macro is working when we have a return statement here. + return 1234; + } } #[cfg(not(feature = "std"))] diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 8ce0b56da2389..ba0c93630cf6c 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -918,3 +918,14 @@ fn unreachable_intrinsic(wasm_method: WasmExecutionMethod) { error => panic!("unexpected error: {:?}", error), } } + +test_wasm_execution!(return_value); +fn return_value(wasm_method: WasmExecutionMethod) { + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + + assert_eq!( + call_in_wasm("test_return_value", &[], wasm_method, &mut ext).unwrap(), + (1234u64).encode() + ); +} diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index ea060a89c13df..7610c4c8f32e0 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -37,7 +37,7 @@ use std::{ use codec::{Decode, Encode}; use sc_executor_common::{ runtime_blob::RuntimeBlob, - wasm_runtime::{InvokeMethod, WasmInstance, WasmModule}, + wasm_runtime::{AllocationStats, InvokeMethod, WasmInstance, WasmModule}, }; use sp_core::{ traits::{CodeExecutor, Externalities, RuntimeCode, RuntimeSpawn, RuntimeSpawnExt}, @@ -101,7 +101,8 @@ pub struct WasmExecutor { /// The path to a directory which the executor can leverage for a file cache, e.g. put there /// compiled artifacts. cache_path: Option, - + /// Ignore missing function imports. + allow_missing_host_functions: bool, phantom: PhantomData, } @@ -112,6 +113,7 @@ impl Clone for WasmExecutor { default_heap_pages: self.default_heap_pages, cache: self.cache.clone(), cache_path: self.cache_path.clone(), + allow_missing_host_functions: self.allow_missing_host_functions, phantom: self.phantom, } } @@ -130,14 +132,13 @@ where /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. /// - /// `host_functions` - The set of host functions to be available for import provided by this - /// executor. - /// /// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse. /// /// `cache_path` - A path to a directory where the executor can place its files for purposes of /// caching. This may be important in cases when there are many different modules with the /// compiled execution method is used. + /// + /// `runtime_cache_size` - The capacity of runtime cache. pub fn new( method: WasmExecutionMethod, default_heap_pages: Option, @@ -154,10 +155,16 @@ where runtime_cache_size, )), cache_path, + allow_missing_host_functions: false, phantom: PhantomData, } } + /// Ignore missing function imports if set true. + pub fn allow_missing_host_functions(&mut self, allow_missing_host_functions: bool) { + self.allow_missing_host_functions = allow_missing_host_functions + } + /// Execute the given closure `f` with the latest runtime (based on `runtime_code`). /// /// The closure `f` is expected to return `Err(_)` when there happened a `panic!` in native code @@ -171,11 +178,10 @@ where /// runtime is invalidated on any `panic!` to prevent a poisoned state. `ext` is already /// implicitly handled as unwind safe, as we store it in a global variable while executing the /// native runtime. - fn with_instance( + pub fn with_instance( &self, runtime_code: &RuntimeCode, ext: &mut dyn Externalities, - allow_missing_host_functions: bool, f: F, ) -> Result where @@ -191,7 +197,7 @@ where ext, self.method, self.default_heap_pages, - allow_missing_host_functions, + self.allow_missing_host_functions, |module, instance, version, ext| { let module = AssertUnwindSafe(module); let instance = AssertUnwindSafe(instance); @@ -209,7 +215,7 @@ where /// The runtime is passed as a [`RuntimeBlob`]. The runtime will be instantiated with the /// parameters this `WasmExecutor` was initialized with. /// - /// In case of problems with during creation of the runtime or instantation, a `Err` is + /// In case of problems with during creation of the runtime or instantiation, a `Err` is /// returned. that describes the message. #[doc(hidden)] // We use this function for tests across multiple crates. pub fn uncached_call( @@ -219,6 +225,47 @@ where allow_missing_host_functions: bool, export_name: &str, call_data: &[u8], + ) -> std::result::Result, Error> { + self.uncached_call_impl( + runtime_blob, + ext, + allow_missing_host_functions, + export_name, + call_data, + &mut None, + ) + } + + /// Same as `uncached_call`, except it also returns allocation statistics. + #[doc(hidden)] // We use this function in tests. + pub fn uncached_call_with_allocation_stats( + &self, + runtime_blob: RuntimeBlob, + ext: &mut dyn Externalities, + allow_missing_host_functions: bool, + export_name: &str, + call_data: &[u8], + ) -> (std::result::Result, Error>, Option) { + let mut allocation_stats = None; + let result = self.uncached_call_impl( + runtime_blob, + ext, + allow_missing_host_functions, + export_name, + call_data, + &mut allocation_stats, + ); + (result, allocation_stats) + } + + fn uncached_call_impl( + &self, + runtime_blob: RuntimeBlob, + ext: &mut dyn Externalities, + allow_missing_host_functions: bool, + export_name: &str, + call_data: &[u8], + allocation_stats_out: &mut Option, ) -> std::result::Result, Error> { let module = crate::wasm_runtime::create_wasm_runtime_with_code::( self.method, @@ -235,10 +282,14 @@ where let mut instance = AssertUnwindSafe(instance); let mut ext = AssertUnwindSafe(ext); let module = AssertUnwindSafe(module); + let mut allocation_stats_out = AssertUnwindSafe(allocation_stats_out); with_externalities_safe(&mut **ext, move || { preregister_builtin_ext(module.clone()); - instance.call_export(export_name, call_data) + let (result, allocation_stats) = + instance.call_with_allocation_stats(export_name.into(), call_data); + **allocation_stats_out = allocation_stats; + result }) .and_then(|r| r) } @@ -309,7 +360,6 @@ where let result = self.with_instance( runtime_code, ext, - false, |module, mut instance, _onchain_version, mut ext| { with_externalities_safe(&mut **ext, move || { preregister_builtin_ext(module.clone()); @@ -330,7 +380,7 @@ where ext: &mut dyn Externalities, runtime_code: &RuntimeCode, ) -> Result { - self.with_instance(runtime_code, ext, false, |_module, _instance, version, _ext| { + self.with_instance(runtime_code, ext, |_module, _instance, version, _ext| { Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) }) } @@ -343,7 +393,7 @@ where D: NativeExecutionDispatch, { /// Dummy field to avoid the compiler complaining about us not using `D`. - _dummy: std::marker::PhantomData, + _dummy: PhantomData, /// Native runtime version info. native_version: NativeVersion, /// Fallback wasm executor. @@ -360,13 +410,17 @@ impl NativeElseWasmExecutor { /// /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. + /// + /// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse. + /// + /// `runtime_cache_size` - The capacity of runtime cache. pub fn new( fallback_method: WasmExecutionMethod, default_heap_pages: Option, max_runtime_instances: usize, runtime_cache_size: u8, ) -> Self { - let wasm_executor = WasmExecutor::new( + let wasm = WasmExecutor::new( fallback_method, default_heap_pages, max_runtime_instances, @@ -377,9 +431,14 @@ impl NativeElseWasmExecutor { NativeElseWasmExecutor { _dummy: Default::default(), native_version: D::native_version(), - wasm: wasm_executor, + wasm, } } + + /// Ignore missing function imports if set true. + pub fn allow_missing_host_functions(&mut self, allow_missing_host_functions: bool) { + self.wasm.allow_missing_host_functions = allow_missing_host_functions + } } impl RuntimeVersionOf for NativeElseWasmExecutor { @@ -388,10 +447,9 @@ impl RuntimeVersionOf for NativeElseWasmExecutor ext: &mut dyn Externalities, runtime_code: &RuntimeCode, ) -> Result { - self.wasm - .with_instance(runtime_code, ext, false, |_module, _instance, version, _ext| { - Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) - }) + self.wasm.with_instance(runtime_code, ext, |_module, _instance, version, _ext| { + Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) + }) } } @@ -558,7 +616,6 @@ impl CodeExecutor for NativeElseWasmExecut let result = self.wasm.with_instance( runtime_code, ext, - false, |module, mut instance, onchain_version, mut ext| { let onchain_version = onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 199fe431580ed..e17707e158321 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -29,6 +29,7 @@ use wasmi::{ }; use codec::{Decode, Encode}; +use sc_allocator::AllocationStats; use sc_executor_common::{ error::{Error, MessageWithBacktrace, WasmError}, runtime_blob::{DataSegmentsSnapshot, RuntimeBlob}, @@ -489,6 +490,7 @@ fn call_in_wasm_module( host_functions: Arc>, allow_missing_func_imports: bool, missing_functions: Arc>, + allocation_stats: &mut Option, ) -> Result, Error> { // Initialize FunctionExecutor. let table: Option = module_instance @@ -561,6 +563,8 @@ fn call_in_wasm_module( }, }; + *allocation_stats = Some(function_executor.heap.borrow().stats()); + match result { Ok(Some(I64(r))) => { let (ptr, length) = unpack_ptr_and_len(r as u64); @@ -761,8 +765,13 @@ pub struct WasmiInstance { // `self.instance` unsafe impl Send for WasmiInstance {} -impl WasmInstance for WasmiInstance { - fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result, Error> { +impl WasmiInstance { + fn call_impl( + &mut self, + method: InvokeMethod, + data: &[u8], + allocation_stats: &mut Option, + ) -> Result, Error> { // We reuse a single wasm instance for multiple calls and a previous call (if any) // altered the state. Therefore, we need to restore the instance to original state. @@ -790,8 +799,21 @@ impl WasmInstance for WasmiInstance { self.host_functions.clone(), self.allow_missing_func_imports, self.missing_functions.clone(), + allocation_stats, ) } +} + +impl WasmInstance for WasmiInstance { + fn call_with_allocation_stats( + &mut self, + method: InvokeMethod, + data: &[u8], + ) -> (Result, Error>, Option) { + let mut allocation_stats = None; + let result = self.call_impl(method, data, &mut allocation_stats); + (result, allocation_stats) + } fn get_global_const(&mut self, name: &str) -> Result, Error> { match self.instance.export_by_name(name) { diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 9d40fb53fd559..e75f2d9d043d0 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -18,6 +18,9 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } libc = "0.2.121" log = "0.4.17" parity-wasm = "0.42.0" + +# When bumping wasmtime do not forget to also bump rustix +# to exactly the same version as used by wasmtime! wasmtime = { version = "0.38.0", default-features = false, features = [ "cache", "cranelift", @@ -32,6 +35,17 @@ sp-runtime-interface = { version = "6.0.0", path = "../../../primitives/runtime- sp-sandbox = { version = "0.10.0-dev", path = "../../../primitives/sandbox" } sp-wasm-interface = { version = "6.0.0", features = ["wasmtime"], path = "../../../primitives/wasm-interface" } +[target.'cfg(target_os = "linux")'.dependencies] +rustix = { version = "0.35.6", default-features = false, features = ["std", "mm", "fs", "param", "use-libc"] } +once_cell = "1.12.0" + +# Here we include the rustix crate used by wasmtime just to enable its 'use-libc' flag. +# +# By default rustix directly calls the appropriate syscalls completely bypassing libc; +# this doesn't have any actual benefits for us besides making it harder to debug memory +# problems (since then `mmap` etc. cannot be easily hooked into). +rustix_wasmtime = { package = "rustix", version = "0.33.7", default-features = false, features = ["std", "use-libc"] } + [dev-dependencies] wat = "1.0" sc-runtime-test = { version = "2.0.0", path = "../runtime-test" } diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index b494795ae9dc1..768a6e36e2390 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -23,7 +23,7 @@ use log::trace; use wasmtime::{Caller, Func, Val}; use codec::{Decode, Encode}; -use sc_allocator::FreeingBumpHeapAllocator; +use sc_allocator::{AllocationStats, FreeingBumpHeapAllocator}; use sc_executor_common::{ error::Result, sandbox::{self, SupervisorFuncIndex}, @@ -66,6 +66,10 @@ impl HostState { pub fn take_panic_message(&mut self) -> Option { self.panic_message.take() } + + pub(crate) fn allocation_stats(&self) -> AllocationStats { + self.allocator.stats() + } } /// A `HostContext` implements `FunctionContext` for making host calls from a Wasmtime @@ -272,12 +276,11 @@ impl<'a> Sandbox for HostContext<'a> { .ok_or("Runtime doesn't have a table; sandbox is unavailable")?; let table_item = table.get(&mut self.caller, dispatch_thunk_id); - table_item + *table_item .ok_or("dispatch_thunk_id is out of bounds")? .funcref() .ok_or("dispatch_thunk_idx should be a funcref")? .ok_or("dispatch_thunk_idx should point to actual func")? - .clone() }; let guest_env = match sandbox::GuestEnvironment::decode(self.sandbox_store(), raw_env_def) { diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index fae8d75854ef3..c80952a2541ce 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -112,7 +112,7 @@ impl<'a, 'b> sp_wasm_interface::HostFunctionRegistry for Registry<'a, 'b> { if self.pending_func_imports.remove(fn_name).is_some() { self.linker.func_wrap("env", fn_name, func).map_err(|error| { WasmError::Other(format!( - "failed to register host function '{}' with the WASM linker: {}", + "failed to register host function '{}' with the WASM linker: {:#}", fn_name, error )) })?; diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index 6abcbca1bba6f..feded4008068d 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -107,8 +107,7 @@ impl EntryPoint { ) -> std::result::Result { let entrypoint = func .typed::<(u32, u32), u64, _>(ctx) - .map_err(|_| "Invalid signature for direct entry point")? - .clone(); + .map_err(|_| "Invalid signature for direct entry point")?; Ok(Self { call_type: EntryPointType::Direct { entrypoint } }) } @@ -119,8 +118,7 @@ impl EntryPoint { ) -> std::result::Result { let dispatcher = dispatcher .typed::<(u32, u32, u32), u64, _>(ctx) - .map_err(|_| "Invalid signature for wrapped entry point")? - .clone(); + .map_err(|_| "Invalid signature for wrapped entry point")?; Ok(Self { call_type: EntryPointType::Wrapped { func, dispatcher } }) } } @@ -186,9 +184,10 @@ impl InstanceWrapper { ) -> Result { let mut store = create_store(engine, max_memory_size); let instance = instance_pre.instantiate(&mut store).map_err(|error| { - WasmError::Other( - format!("failed to instantiate a new WASM module instance: {}", error,), - ) + WasmError::Other(format!( + "failed to instantiate a new WASM module instance: {:#}", + error, + )) })?; let memory = get_linear_memory(&instance, &mut store)?; @@ -213,9 +212,8 @@ impl InstanceWrapper { Error::from(format!("Exported method {} is not found", method)) })?; let func = extern_func(&export) - .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))? - .clone(); - EntryPoint::direct(func, &self.store).map_err(|_| { + .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))?; + EntryPoint::direct(*func, &self.store).map_err(|_| { Error::from(format!("Exported function '{}' has invalid signature.", method)) })? }, @@ -230,10 +228,9 @@ impl InstanceWrapper { let func = val .funcref() .ok_or(Error::TableElementIsNotAFunction(func_ref))? - .ok_or(Error::FunctionRefIsNull(func_ref))? - .clone(); + .ok_or(Error::FunctionRefIsNull(func_ref))?; - EntryPoint::direct(func, &self.store).map_err(|_| { + EntryPoint::direct(*func, &self.store).map_err(|_| { Error::from(format!( "Function @{} in exported table has invalid signature for direct call.", func_ref, @@ -251,10 +248,9 @@ impl InstanceWrapper { let dispatcher = val .funcref() .ok_or(Error::TableElementIsNotAFunction(dispatcher_ref))? - .ok_or(Error::FunctionRefIsNull(dispatcher_ref))? - .clone(); + .ok_or(Error::FunctionRefIsNull(dispatcher_ref))?; - EntryPoint::wrapped(dispatcher, func, &self.store).map_err(|_| { + EntryPoint::wrapped(*dispatcher, func, &self.store).map_err(|_| { Error::from(format!( "Function @{} in exported table has invalid signature for wrapped call.", dispatcher_ref, @@ -314,9 +310,8 @@ fn get_linear_memory(instance: &Instance, ctx: impl AsContextMut) -> Result Result> { +impl WasmtimeInstance { + fn call_impl( + &mut self, + method: InvokeMethod, + data: &[u8], + allocation_stats: &mut Option, + ) -> Result> { match &mut self.strategy { Strategy::LegacyInstanceReuse { ref mut instance_wrapper, @@ -205,7 +210,8 @@ impl WasmInstance for WasmtimeInstance { globals_snapshot.apply(&mut InstanceGlobals { instance: instance_wrapper }); let allocator = FreeingBumpHeapAllocator::new(*heap_base); - let result = perform_call(data, instance_wrapper, entrypoint, allocator); + let result = + perform_call(data, instance_wrapper, entrypoint, allocator, allocation_stats); // Signal to the OS that we are done with the linear memory and that it can be // reclaimed. @@ -219,10 +225,22 @@ impl WasmInstance for WasmtimeInstance { let entrypoint = instance_wrapper.resolve_entrypoint(method)?; let allocator = FreeingBumpHeapAllocator::new(heap_base); - perform_call(data, &mut instance_wrapper, entrypoint, allocator) + perform_call(data, &mut instance_wrapper, entrypoint, allocator, allocation_stats) }, } } +} + +impl WasmInstance for WasmtimeInstance { + fn call_with_allocation_stats( + &mut self, + method: InvokeMethod, + data: &[u8], + ) -> (Result>, Option) { + let mut allocation_stats = None; + let result = self.call_impl(method, data, &mut allocation_stats); + (result, allocation_stats) + } fn get_global_const(&mut self, name: &str) -> Result> { match &mut self.strategy { @@ -257,12 +275,12 @@ fn setup_wasmtime_caching( let wasmtime_cache_root = cache_path.join("wasmtime"); fs::create_dir_all(&wasmtime_cache_root) - .map_err(|err| format!("cannot create the dirs to cache: {:?}", err))?; + .map_err(|err| format!("cannot create the dirs to cache: {}", err))?; // Canonicalize the path after creating the directories. let wasmtime_cache_root = wasmtime_cache_root .canonicalize() - .map_err(|err| format!("failed to canonicalize the path: {:?}", err))?; + .map_err(|err| format!("failed to canonicalize the path: {}", err))?; // Write the cache config file let cache_config_path = wasmtime_cache_root.join("cache-config.toml"); @@ -275,11 +293,11 @@ directory = \"{cache_dir}\" cache_dir = wasmtime_cache_root.display() ); fs::write(&cache_config_path, config_content) - .map_err(|err| format!("cannot write the cache config: {:?}", err))?; + .map_err(|err| format!("cannot write the cache config: {}", err))?; config .cache_config_load(cache_config_path) - .map_err(|err| format!("failed to parse the config: {:?}", err))?; + .map_err(|err| format!("failed to parse the config: {:#}", err))?; Ok(()) } @@ -304,15 +322,21 @@ fn common_config(semantics: &Semantics) -> std::result::Result native_stack_max, + + // In `wasmtime` 0.35 the default stack size limit was changed from 1MB to 512KB. + // + // This broke at least one parachain which depended on the original 1MB limit, + // so here we restore it to what it was originally. + None => 1024 * 1024, + }; + + config + .max_wasm_stack(native_stack_max as usize) + .map_err(|e| WasmError::Other(format!("cannot set max wasm stack: {:#}", e)))?; config.parallel_compilation(semantics.parallel_compilation); @@ -366,7 +390,7 @@ fn common_config(semantics: &Semantics) -> std::result::Result std::result::Result( code_supply_mode: CodeSupplyMode<'_>, - config: Config, + mut config: Config, ) -> std::result::Result where H: HostFunctions, { + replace_strategy_if_broken(&mut config.semantics.instantiation_strategy); + let mut wasmtime_config = common_config(&config.semantics)?; if let Some(ref cache_path) = config.cache_path { if let Err(reason) = setup_wasmtime_caching(cache_path, &mut wasmtime_config) { @@ -614,7 +642,7 @@ where } let engine = Engine::new(&wasmtime_config) - .map_err(|e| WasmError::Other(format!("cannot create the wasmtime engine: {}", e)))?; + .map_err(|e| WasmError::Other(format!("cannot create the wasmtime engine: {:#}", e)))?; let (module, instantiation_strategy) = match code_supply_mode { CodeSupplyMode::Fresh(blob) => { @@ -622,7 +650,7 @@ where let serialized_blob = blob.clone().serialize(); let module = wasmtime::Module::new(&engine, &serialized_blob) - .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; + .map_err(|e| WasmError::Other(format!("cannot create module: {:#}", e)))?; match config.semantics.instantiation_strategy { InstantiationStrategy::LegacyInstanceReuse => { @@ -660,7 +688,7 @@ where // // See [`create_runtime_from_artifact`] for more details. let module = wasmtime::Module::deserialize_file(&engine, compiled_artifact_path) - .map_err(|e| WasmError::Other(format!("cannot deserialize module: {}", e)))?; + .map_err(|e| WasmError::Other(format!("cannot deserialize module: {:#}", e)))?; (module, InternalInstantiationStrategy::Builtin) }, @@ -673,7 +701,7 @@ where crate::instance_wrapper::create_store(module.engine(), config.semantics.max_memory_size); let instance_pre = linker .instantiate_pre(&mut store, &module) - .map_err(|e| WasmError::Other(format!("cannot preinstantiate module: {}", e)))?; + .map_err(|e| WasmError::Other(format!("cannot preinstantiate module: {:#}", e)))?; Ok(WasmtimeRuntime { engine, @@ -719,14 +747,17 @@ pub fn prepare_runtime_artifact( blob: RuntimeBlob, semantics: &Semantics, ) -> std::result::Result, WasmError> { - let blob = prepare_blob_for_compilation(blob, semantics)?; + let mut semantics = semantics.clone(); + replace_strategy_if_broken(&mut semantics.instantiation_strategy); + + let blob = prepare_blob_for_compilation(blob, &semantics)?; - let engine = Engine::new(&common_config(semantics)?) - .map_err(|e| WasmError::Other(format!("cannot create the engine: {}", e)))?; + let engine = Engine::new(&common_config(&semantics)?) + .map_err(|e| WasmError::Other(format!("cannot create the engine: {:#}", e)))?; engine .precompile_module(&blob.serialize()) - .map_err(|e| WasmError::Other(format!("cannot precompile module: {}", e))) + .map_err(|e| WasmError::Other(format!("cannot precompile module: {:#}", e))) } fn perform_call( @@ -734,6 +765,7 @@ fn perform_call( instance_wrapper: &mut InstanceWrapper, entrypoint: EntryPoint, mut allocator: FreeingBumpHeapAllocator, + allocation_stats: &mut Option, ) -> Result> { let (data_ptr, data_len) = inject_input_data(instance_wrapper, &mut allocator, data)?; @@ -747,7 +779,10 @@ fn perform_call( .map(unpack_ptr_and_len); // Reset the host state - instance_wrapper.store_mut().data_mut().host_state = None; + let host_state = instance_wrapper.store_mut().data_mut().host_state.take().expect( + "the host state is always set before calling into WASM so it can't be None here; qed", + ); + *allocation_stats = Some(host_state.allocation_stats()); let (output_ptr, output_len) = ret?; let output = extract_output_data(instance_wrapper, output_ptr, output_len)?; diff --git a/client/executor/wasmtime/src/tests.rs b/client/executor/wasmtime/src/tests.rs index e0fd9fbce0c57..9126cb336bde6 100644 --- a/client/executor/wasmtime/src/tests.rs +++ b/client/executor/wasmtime/src/tests.rs @@ -122,7 +122,7 @@ impl RuntimeBuilder { self } - fn build<'a>(&'a mut self) -> impl WasmModule + 'a { + fn build(&mut self) -> impl WasmModule + '_ { let blob = { let wasm: Vec; @@ -174,6 +174,85 @@ impl RuntimeBuilder { } } +fn deep_call_stack_wat(depth: usize) -> String { + format!( + r#" + (module + (memory $0 32) + (export "memory" (memory $0)) + (global (export "__heap_base") i32 (i32.const 0)) + (func (export "overflow") call 0) + + (func $overflow (param $0 i32) + (block $label$1 + (br_if $label$1 + (i32.ge_u + (local.get $0) + (i32.const {depth}) + ) + ) + (call $overflow + (i32.add + (local.get $0) + (i32.const 1) + ) + ) + ) + ) + + (func (export "main") + (param i32 i32) (result i64) + (call $overflow (i32.const 0)) + (i64.const 0) + ) + ) + "# + ) +} + +// These two tests ensure that the `wasmtime`'s stack size limit and the amount of +// stack space used by a single stack frame doesn't suddenly change without us noticing. +// +// If they do (e.g. because we've pulled in a new version of `wasmtime`) we want to know +// that it did, regardless of how small the change was. +// +// If these tests starting failing it doesn't necessarily mean that something is broken; +// what it means is that one (or multiple) of the following has to be done: +// a) the tests may need to be updated for the new call depth, +// b) the stack limit may need to be changed to maintain backwards compatibility, +// c) the root cause of the new call depth limit determined, and potentially fixed, +// d) the new call depth limit may need to be validated to ensure it doesn't prevent any +// existing chain from syncing (if it was effectively decreased) + +// We need two limits here since depending on whether the code is compiled in debug +// or in release mode the maximum call depth is slightly different. +const CALL_DEPTH_LOWER_LIMIT: usize = 65478; +const CALL_DEPTH_UPPER_LIMIT: usize = 65514; + +test_wasm_execution!(test_consume_under_1mb_of_stack_does_not_trap); +fn test_consume_under_1mb_of_stack_does_not_trap(instantiation_strategy: InstantiationStrategy) { + let wat = deep_call_stack_wat(CALL_DEPTH_LOWER_LIMIT); + let mut builder = RuntimeBuilder::new(instantiation_strategy).use_wat(wat); + let runtime = builder.build(); + let mut instance = runtime.new_instance().expect("failed to instantiate a runtime"); + instance.call_export("main", &[]).unwrap(); +} + +test_wasm_execution!(test_consume_over_1mb_of_stack_does_trap); +fn test_consume_over_1mb_of_stack_does_trap(instantiation_strategy: InstantiationStrategy) { + let wat = deep_call_stack_wat(CALL_DEPTH_UPPER_LIMIT + 1); + let mut builder = RuntimeBuilder::new(instantiation_strategy).use_wat(wat); + let runtime = builder.build(); + let mut instance = runtime.new_instance().expect("failed to instantiate a runtime"); + match instance.call_export("main", &[]).unwrap_err() { + Error::AbortedDueToTrap(error) => { + let expected = "wasm trap: call stack exhausted"; + assert_eq!(error.message, expected); + }, + error => panic!("unexpected error: {:?}", error), + } +} + test_wasm_execution!(test_nan_canonicalization); fn test_nan_canonicalization(instantiation_strategy: InstantiationStrategy) { let mut builder = RuntimeBuilder::new(instantiation_strategy).canonicalize_nans(true); diff --git a/client/executor/wasmtime/src/util.rs b/client/executor/wasmtime/src/util.rs index 60ca598aee181..83745e21e86af 100644 --- a/client/executor/wasmtime/src/util.rs +++ b/client/executor/wasmtime/src/util.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::runtime::StoreData; +use crate::{runtime::StoreData, InstantiationStrategy}; use sc_executor_common::{ error::{Error, Result}, util::checked_range, @@ -98,3 +98,94 @@ pub(crate) fn write_memory_from( memory[range].copy_from_slice(data); Ok(()) } + +/// Checks whether the `madvise(MADV_DONTNEED)` works as expected. +/// +/// In certain environments (e.g. when running under the QEMU user-mode emulator) +/// this syscall is broken. +#[cfg(target_os = "linux")] +fn is_madvise_working() -> std::result::Result { + let page_size = rustix::param::page_size(); + + unsafe { + // Allocate two memory pages. + let pointer = rustix::mm::mmap_anonymous( + std::ptr::null_mut(), + 2 * page_size, + rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE, + rustix::mm::MapFlags::PRIVATE, + ) + .map_err(|error| format!("mmap failed: {}", error))?; + + // Dirty them both. + std::ptr::write_volatile(pointer.cast::(), b'A'); + std::ptr::write_volatile(pointer.cast::().add(page_size), b'B'); + + // Clear the first page. + let result_madvise = + rustix::mm::madvise(pointer, page_size, rustix::mm::Advice::LinuxDontNeed) + .map_err(|error| format!("madvise failed: {}", error)); + + // Fetch the values. + let value_1 = std::ptr::read_volatile(pointer.cast::()); + let value_2 = std::ptr::read_volatile(pointer.cast::().add(page_size)); + + let result_munmap = rustix::mm::munmap(pointer, 2 * page_size) + .map_err(|error| format!("munmap failed: {}", error)); + + result_madvise?; + result_munmap?; + + // Verify that the first page was cleared, while the second one was not. + Ok(value_1 == 0 && value_2 == b'B') + } +} + +#[cfg(test)] +#[cfg(target_os = "linux")] +#[test] +fn test_is_madvise_working_check_does_not_fail() { + assert!(is_madvise_working().is_ok()); +} + +/// Checks whether a given instantiation strategy can be safely used, and replaces +/// it with a slower (but sound) alternative if it isn't. +#[cfg(target_os = "linux")] +pub(crate) fn replace_strategy_if_broken(strategy: &mut InstantiationStrategy) { + let replacement_strategy = match *strategy { + // These strategies don't need working `madvise`. + InstantiationStrategy::Pooling | InstantiationStrategy::RecreateInstance => return, + + // These strategies require a working `madvise` to be sound. + InstantiationStrategy::PoolingCopyOnWrite => InstantiationStrategy::Pooling, + InstantiationStrategy::RecreateInstanceCopyOnWrite | + InstantiationStrategy::LegacyInstanceReuse => InstantiationStrategy::RecreateInstance, + }; + + use once_cell::sync::OnceCell; + static IS_OK: OnceCell = OnceCell::new(); + + let is_ok = IS_OK.get_or_init(|| { + let is_ok = match is_madvise_working() { + Ok(is_ok) => is_ok, + Err(error) => { + // This should never happen. + log::warn!("Failed to check whether `madvise(MADV_DONTNEED)` works: {}", error); + false + } + }; + + if !is_ok { + log::warn!("You're running on a system with a broken `madvise(MADV_DONTNEED)` implementation. This will result in lower performance."); + } + + is_ok + }); + + if !is_ok { + *strategy = replacement_strategy; + } +} + +#[cfg(not(target_os = "linux"))] +pub(crate) fn replace_strategy_if_broken(_: &mut InstantiationStrategy) {} diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 77cd847d48169..5950b108ca4ab 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ahash = "0.7.6" -async-trait = "0.1.50" +async-trait = "0.1.57" dyn-clone = "1.0" finality-grandpa = { version = "0.16.0", features = ["derive-codec"] } futures = "0.3.21" @@ -23,9 +23,9 @@ futures-timer = "3.0.1" hex = "0.4.2" log = "0.4.17" parity-scale-codec = { version = "3.0.0", features = ["derive"] } -parking_lot = "0.12.0" +parking_lot = "0.12.1" rand = "0.8.4" -serde_json = "1.0.79" +serde_json = "1.0.85" thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } @@ -36,6 +36,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } sc-network-gossip = { version = "0.10.0-dev", path = "../network-gossip" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index f2d37da058f45..075179d3ceaf7 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -12,7 +12,7 @@ homepage = "https://substrate.io" [dependencies] finality-grandpa = { version = "0.16.0", features = ["derive-codec"] } futures = "0.3.16" -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } log = "0.4.8" parity-scale-codec = { version = "3.0.0", features = ["derive"] } serde = { version = "1.0.105", features = ["derive"] } diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index 1cf23a18c794b..85df72de77b54 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -26,7 +26,8 @@ use std::sync::Arc; use jsonrpsee::{ core::{async_trait, RpcResult}, proc_macros::rpc, - PendingSubscription, + types::SubscriptionResult, + SubscriptionSink, }; mod error; @@ -102,7 +103,7 @@ where ReportedRoundStates::from(&self.authority_set, &self.voter_state).map_err(Into::into) } - fn subscribe_justifications(&self, pending: PendingSubscription) { + fn subscribe_justifications(&self, mut sink: SubscriptionSink) -> SubscriptionResult { let stream = self.justification_stream.subscribe().map( |x: sc_finality_grandpa::GrandpaJustification| { JustificationNotification::from(x) @@ -110,12 +111,11 @@ where ); let fut = async move { - if let Some(mut sink) = pending.accept() { - sink.pipe_from_stream(stream).await; - } + sink.pipe_from_stream(stream).await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + Ok(()) } async fn prove_finality( @@ -283,9 +283,9 @@ mod tests { let (rpc, _) = setup_io_handler(EmptyVoterState); let expected_response = r#"{"jsonrpc":"2.0","error":{"code":1,"message":"GRANDPA RPC endpoint not ready"},"id":0}"#.to_string(); let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":0}"#; - let (result, _) = rpc.raw_json_request(&request).await.unwrap(); + let (response, _) = rpc.raw_json_request(&request).await.unwrap(); - assert_eq!(expected_response, result,); + assert_eq!(expected_response, response.result); } #[tokio::test] @@ -306,8 +306,8 @@ mod tests { },\"id\":0}".to_string(); let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":0}"#; - let (result, _) = rpc.raw_json_request(&request).await.unwrap(); - assert_eq!(expected_response, result); + let (response, _) = rpc.raw_json_request(&request).await.unwrap(); + assert_eq!(expected_response, response.result); } #[tokio::test] @@ -328,7 +328,7 @@ mod tests { .unwrap(); let expected = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; - assert_eq!(response, expected); + assert_eq!(response.result, expected); } fn create_justification() -> GrandpaJustification { diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 250e640cbf037..5f94a4d1b65be 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -89,7 +89,8 @@ use log::{debug, trace}; use parity_scale_codec::{Decode, Encode}; use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; use rand::seq::SliceRandom; -use sc_network::{ObservedRole, PeerId, ReputationChange}; +use sc_network::{PeerId, ReputationChange}; +use sc_network_common::protocol::event::ObservedRole; use sc_network_gossip::{MessageIntent, ValidatorContext}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -728,11 +729,7 @@ type MaybeMessage = Option<(Vec, NeighborPacket> impl Inner { fn new(config: crate::Config) -> Self { - let catch_up_config = if config.local_role.is_light() { - // if we are a light client we shouldn't be issuing any catch-up requests - // as we don't participate in the full GRANDPA protocol - CatchUpConfig::disabled() - } else if config.observer_enabled { + let catch_up_config = if config.observer_enabled { if config.local_role.is_authority() { // since the observer protocol is enabled, we will only issue // catch-up requests if we are an authority (and only to other @@ -1231,10 +1228,6 @@ impl Inner { None => return false, }; - if self.config.local_role.is_light() { - return false - } - if round_elapsed < round_duration.mul_f32(PROPAGATION_SOME) { self.peers.first_stage_peers.contains(who) } else if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { @@ -1266,10 +1259,6 @@ impl Inner { None => return false, }; - if self.config.local_role.is_light() { - return false - } - if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { self.peers.first_stage_peers.contains(who) || self.peers.second_stage_peers.contains(who) || @@ -1740,7 +1729,7 @@ mod tests { assert!(res.unwrap().is_none()); // connect & disconnect. - peers.new_peer(id.clone(), ObservedRole::Authority); + peers.new_peer(id, ObservedRole::Authority); peers.peer_disconnected(&id); let res = peers.update_peer_state(&id, update.clone()); @@ -1764,7 +1753,7 @@ mod tests { let mut peers = Peers::default(); let id = PeerId::random(); - peers.new_peer(id.clone(), ObservedRole::Authority); + peers.new_peer(id, ObservedRole::Authority); let mut check_update = move |update: NeighborPacket<_>| { let view = peers.update_peer_state(&id, update.clone()).unwrap().unwrap(); @@ -1784,7 +1773,7 @@ mod tests { let mut peers = Peers::default(); let id = PeerId::random(); - peers.new_peer(id.clone(), ObservedRole::Authority); + peers.new_peer(id, ObservedRole::Authority); peers .update_peer_state( @@ -1975,7 +1964,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded let mut inner = val.inner.write(); - inner.peers.new_peer(peer.clone(), ObservedRole::Authority); + inner.peers.new_peer(peer, ObservedRole::Authority); let res = inner.handle_catch_up_request( &peer, @@ -2016,7 +2005,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer, ObservedRole::Authority); let send_request = |set_id, round| { let mut inner = val.inner.write(); @@ -2071,7 +2060,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded. let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer, ObservedRole::Authority); let import_neighbor_message = |set_id, round| { let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( @@ -2141,7 +2130,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded. let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer, ObservedRole::Authority); // importing a neighbor message from a peer in the same set in a later // round should lead to a catch up request but since they're disabled @@ -2169,11 +2158,8 @@ mod tests { let peer_authority = PeerId::random(); let peer_full = PeerId::random(); - val.inner - .write() - .peers - .new_peer(peer_authority.clone(), ObservedRole::Authority); - val.inner.write().peers.new_peer(peer_full.clone(), ObservedRole::Full); + val.inner.write().peers.new_peer(peer_authority, ObservedRole::Authority); + val.inner.write().peers.new_peer(peer_full, ObservedRole::Full); let import_neighbor_message = |peer| { let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( @@ -2222,7 +2208,7 @@ mod tests { // add the peer making the requests to the validator, otherwise it is // discarded. let peer_full = PeerId::random(); - val.inner.write().peers.new_peer(peer_full.clone(), ObservedRole::Full); + val.inner.write().peers.new_peer(peer_full, ObservedRole::Full); let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( &peer_full, @@ -2282,12 +2268,9 @@ mod tests { full_nodes.resize_with(30, || PeerId::random()); for i in 0..30 { - val.inner - .write() - .peers - .new_peer(authorities[i].clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(authorities[i], ObservedRole::Authority); - val.inner.write().peers.new_peer(full_nodes[i].clone(), ObservedRole::Full); + val.inner.write().peers.new_peer(full_nodes[i], ObservedRole::Full); } let test = |rounds_elapsed, peers| { @@ -2366,7 +2349,7 @@ mod tests { // add a new light client as peer let light_peer = PeerId::random(); - val.inner.write().peers.new_peer(light_peer.clone(), ObservedRole::Light); + val.inner.write().peers.new_peer(light_peer, ObservedRole::Light); assert!(!val.message_allowed()( &light_peer, @@ -2438,7 +2421,7 @@ mod tests { // add a new peer at set id 1 let peer1 = PeerId::random(); - val.inner.write().peers.new_peer(peer1.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer1, ObservedRole::Authority); val.inner .write() @@ -2451,7 +2434,7 @@ mod tests { // peer2 will default to set id 0 let peer2 = PeerId::random(); - val.inner.write().peers.new_peer(peer2.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer2, ObservedRole::Authority); // create a commit for round 1 of set id 1 // targeting a block at height 2 diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 0555a03db149f..7a47ebe214950 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -45,7 +45,7 @@ use finality_grandpa::{ Message::{Precommit, Prevote, PrimaryPropose}, }; use parity_scale_codec::{Decode, Encode}; -use sc_network::{NetworkService, ReputationChange}; +use sc_network::ReputationChange; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sp_keystore::SyncCryptoStorePtr; @@ -58,6 +58,7 @@ use crate::{ use gossip::{ FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage, }; +use sc_network_common::service::{NetworkBlock, NetworkSyncForkRequest}; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_finality_grandpa::{AuthorityId, AuthoritySignature, RoundNumber, SetId as SetIdNumber}; @@ -156,34 +157,26 @@ const TELEMETRY_VOTERS_LIMIT: usize = 10; /// /// Something that provides both the capabilities needed for the `gossip_network::Network` trait as /// well as the ability to set a fork sync request for a particular block. -pub trait Network: GossipNetwork + Clone + Send + 'static { - /// Notifies the sync service to try and sync the given block from the given - /// peers. - /// - /// If the given vector of peers is empty then the underlying implementation - /// should make a best effort to fetch the block from any peers it is - /// connected to (NOTE: this assumption will change in the future #3629). - fn set_sync_fork_request( - &self, - peers: Vec, - hash: Block::Hash, - number: NumberFor, - ); +pub trait Network: + NetworkSyncForkRequest> + + NetworkBlock> + + GossipNetwork + + Clone + + Send + + 'static +{ } -impl Network for Arc> +impl Network for T where - B: BlockT, - H: sc_network::ExHashT, + Block: BlockT, + T: NetworkSyncForkRequest> + + NetworkBlock> + + GossipNetwork + + Clone + + Send + + 'static, { - fn set_sync_fork_request( - &self, - peers: Vec, - hash: B::Hash, - number: NumberFor, - ) { - NetworkService::set_sync_fork_request(self, peers, hash, number) - } } /// Create a unique topic for a round and set-id combo. @@ -320,7 +313,7 @@ impl> NetworkBridge { voters: Arc>, has_voted: HasVoted, ) -> (impl Stream> + Unpin, OutgoingMessages) { - self.note_round(round, set_id, &*voters); + self.note_round(round, set_id, &voters); let keystore = keystore.and_then(|ks| { let id = ks.local_id(); @@ -467,7 +460,7 @@ impl> NetworkBridge { hash: B::Hash, number: NumberFor, ) { - Network::set_sync_fork_request(&self.service, peers, hash, number) + self.service.set_sync_fork_request(peers, hash, number) } } @@ -637,9 +630,9 @@ fn incoming_global( .filter_map(move |(notification, msg)| { future::ready(match msg { GossipMessage::Commit(msg) => - process_commit(msg, notification, &gossip_engine, &gossip_validator, &*voters), + process_commit(msg, notification, &gossip_engine, &gossip_validator, &voters), GossipMessage::CatchUp(msg) => - process_catch_up(msg, notification, &gossip_engine, &gossip_validator, &*voters), + process_catch_up(msg, notification, &gossip_engine, &gossip_validator, &voters), _ => { debug!(target: "afg", "Skipping unknown message type"); None diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 0ec5092a2a047..4d709f56f3d61 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -25,7 +25,15 @@ use super::{ use crate::{communication::grandpa_protocol_name, environment::SharedVoterSetState}; use futures::prelude::*; use parity_scale_codec::Encode; -use sc_network::{config::Role, Event as NetworkEvent, ObservedRole, PeerId}; +use sc_network::{config::Role, Multiaddr, PeerId, ReputationChange}; +use sc_network_common::{ + config::MultiaddrWithPeerId, + protocol::event::{Event as NetworkEvent, ObservedRole}, + service::{ + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NetworkSyncForkRequest, NotificationSender, NotificationSenderError, + }, +}; use sc_network_gossip::Validator; use sc_network_test::{Block, Hash}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -34,6 +42,7 @@ use sp_keyring::Ed25519Keyring; use sp_runtime::traits::NumberFor; use std::{ borrow::Cow, + collections::HashSet, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -42,8 +51,8 @@ use std::{ #[derive(Debug)] pub(crate) enum Event { EventStream(TracingUnboundedSender), - WriteNotification(sc_network::PeerId, Vec), - Report(sc_network::PeerId, sc_network::ReputationChange), + WriteNotification(PeerId, Vec), + Report(PeerId, ReputationChange), Announce(Hash), } @@ -52,57 +61,130 @@ pub(crate) struct TestNetwork { sender: TracingUnboundedSender, } -impl sc_network_gossip::Network for TestNetwork { - fn event_stream(&self) -> Pin + Send>> { - let (tx, rx) = tracing_unbounded("test"); - let _ = self.sender.unbounded_send(Event::EventStream(tx)); - Box::pin(rx) +impl NetworkPeers for TestNetwork { + fn set_authorized_peers(&self, _peers: HashSet) { + unimplemented!(); + } + + fn set_authorized_only(&self, _reserved_only: bool) { + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); } - fn report_peer(&self, who: sc_network::PeerId, cost_benefit: sc_network::ReputationChange) { + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit)); } - fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) {} + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set(&self, _protocol: Cow<'static, str>, _peers: Vec) {} - fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } - fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) {} + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } - fn write_notification(&self, who: PeerId, _: Cow<'static, str>, message: Vec) { - let _ = self.sender.unbounded_send(Event::WriteNotification(who, message)); + fn sync_num_connected(&self) -> usize { + unimplemented!(); } +} - fn announce(&self, block: Hash, _associated_data: Option>) { - let _ = self.sender.unbounded_send(Event::Announce(block)); +impl NetworkEventStream for TestNetwork { + fn event_stream( + &self, + _name: &'static str, + ) -> Pin + Send>> { + let (tx, rx) = tracing_unbounded("test"); + let _ = self.sender.unbounded_send(Event::EventStream(tx)); + Box::pin(rx) } } -impl super::Network for TestNetwork { - fn set_sync_fork_request( +impl NetworkNotification for TestNetwork { + fn write_notification(&self, target: PeerId, _protocol: Cow<'static, str>, message: Vec) { + let _ = self.sender.unbounded_send(Event::WriteNotification(target, message)); + } + + fn notification_sender( &self, - _peers: Vec, - _hash: Hash, - _number: NumberFor, - ) { + _target: PeerId, + _protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + unimplemented!(); + } +} + +impl NetworkBlock> for TestNetwork { + fn announce_block(&self, hash: Hash, _data: Option>) { + let _ = self.sender.unbounded_send(Event::Announce(hash)); + } + + fn new_best_block_imported(&self, _hash: Hash, _number: NumberFor) { + unimplemented!(); } } +impl NetworkSyncForkRequest> for TestNetwork { + fn set_sync_fork_request(&self, _peers: Vec, _hash: Hash, _number: NumberFor) {} +} + impl sc_network_gossip::ValidatorContext for TestNetwork { fn broadcast_topic(&mut self, _: Hash, _: bool) {} fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} - fn send_message(&mut self, who: &sc_network::PeerId, data: Vec) { - >::write_notification( + fn send_message(&mut self, who: &PeerId, data: Vec) { + ::write_notification( self, - who.clone(), + *who, grandpa_protocol_name::NAME.into(), data, ); } - fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} + fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {} } pub(crate) struct Tester { @@ -199,7 +281,7 @@ pub(crate) fn make_test_network() -> (impl Future, TestNetwork) } fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { - keys.iter().map(|key| key.clone().public().into()).map(|id| (id, 1)).collect() + keys.iter().map(|&key| key.public().into()).map(|id| (id, 1)).collect() } struct NoopContext; @@ -207,8 +289,8 @@ struct NoopContext; impl sc_network_gossip::ValidatorContext for NoopContext { fn broadcast_topic(&mut self, _: Hash, _: bool) {} fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} - fn send_message(&mut self, _: &sc_network::PeerId, _: Vec) {} - fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} + fn send_message(&mut self, _: &PeerId, _: Vec) {} + fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {} } #[test] @@ -224,8 +306,7 @@ fn good_commit_leads_to_relay() { let target_hash: Hash = [1; 32].into(); let target_number = 500; - let precommit = - finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; + let precommit = finality_grandpa::Precommit { target_hash, target_number }; let payload = sp_finality_grandpa::localized_payload( round, set_id, @@ -252,7 +333,7 @@ fn good_commit_leads_to_relay() { }) .encode(); - let id = sc_network::PeerId::random(); + let id = PeerId::random(); let global_topic = super::global_topic::(set_id); let test = make_test_network() @@ -281,19 +362,19 @@ fn good_commit_leads_to_relay() { // asking for global communication will cause the test network // to send us an event asking us for a stream. use it to // send a message. - let sender_id = id.clone(); + let sender_id = id; let send_message = tester.filter_network_events(move |event| match event { Event::EventStream(sender) => { // Add the sending peer and send the commit let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: sender_id.clone(), + remote: sender_id, protocol: grandpa_protocol_name::NAME.into(), negotiated_fallback: None, role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: sender_id.clone(), + remote: sender_id, messages: vec![( grandpa_protocol_name::NAME.into(), commit_to_send.clone().into(), @@ -301,9 +382,9 @@ fn good_commit_leads_to_relay() { }); // Add a random peer which will be the recipient of this message - let receiver_id = sc_network::PeerId::random(); + let receiver_id = PeerId::random(); let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: receiver_id.clone(), + remote: receiver_id, protocol: grandpa_protocol_name::NAME.into(), negotiated_fallback: None, role: ObservedRole::Full, @@ -375,8 +456,7 @@ fn bad_commit_leads_to_report() { let target_hash: Hash = [1; 32].into(); let target_number = 500; - let precommit = - finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; + let precommit = finality_grandpa::Precommit { target_hash, target_number }; let payload = sp_finality_grandpa::localized_payload( round, set_id, @@ -403,7 +483,7 @@ fn bad_commit_leads_to_report() { }) .encode(); - let id = sc_network::PeerId::random(); + let id = PeerId::random(); let global_topic = super::global_topic::(set_id); let test = make_test_network() @@ -432,17 +512,17 @@ fn bad_commit_leads_to_report() { // asking for global communication will cause the test network // to send us an event asking us for a stream. use it to // send a message. - let sender_id = id.clone(); + let sender_id = id; let send_message = tester.filter_network_events(move |event| match event { Event::EventStream(sender) => { let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: sender_id.clone(), + remote: sender_id, protocol: grandpa_protocol_name::NAME.into(), negotiated_fallback: None, role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: sender_id.clone(), + remote: sender_id, messages: vec![( grandpa_protocol_name::NAME.into(), commit_to_send.clone().into(), @@ -484,7 +564,7 @@ fn bad_commit_leads_to_report() { #[test] fn peer_with_higher_view_leads_to_catch_up_request() { - let id = sc_network::PeerId::random(); + let id = PeerId::random(); let (tester, mut net) = make_test_network(); let test = tester diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index eefb3d3f0aee4..b5a0d7be70f19 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -284,7 +284,7 @@ where impl<'a, H, N> InnerGuard<'a, H, N> { fn as_mut(&mut self) -> &mut AuthoritySet { - &mut **self.guard.as_mut().expect("only taken on deconstruction; qed") + self.guard.as_mut().expect("only taken on deconstruction; qed") } fn set_old(&mut self, old: AuthoritySet) { diff --git a/client/finality-grandpa/src/justification.rs b/client/finality-grandpa/src/justification.rs index 44abb4b95beba..6c3b6aa826ded 100644 --- a/client/finality-grandpa/src/justification.rs +++ b/client/finality-grandpa/src/justification.rs @@ -74,7 +74,7 @@ impl GrandpaJustification { .iter() .map(|signed| &signed.precommit) .min_by_key(|precommit| precommit.target_number) - .map(|precommit| (precommit.target_hash.clone(), precommit.target_number)) + .map(|precommit| (precommit.target_hash, precommit.target_number)) { None => return error(), Some(base) => base, @@ -176,7 +176,7 @@ impl GrandpaJustification { .iter() .map(|signed| &signed.precommit) .min_by_key(|precommit| precommit.target_number) - .map(|precommit| precommit.target_hash.clone()) + .map(|precommit| precommit.target_hash) .expect( "can only fail if precommits is empty; \ commit has been validated above; \ diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 85d80dcfe7cde..9bcb03c0555c2 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -289,7 +289,7 @@ where network.note_round( crate::communication::Round(round), crate::communication::SetId(set_id), - &*voters, + &voters, ) } }; diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 2d12232b04f15..3dd21d51b6a2d 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -28,7 +28,7 @@ use sc_consensus::{ BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, ImportedAux, }; -use sc_network::config::{ProtocolConfig, Role}; +use sc_network::config::Role; use sc_network_test::{ Block, BlockImportAdapter, FullPeerConfig, Hash, PassThroughVerifier, Peer, PeersClient, PeersFullClient, TestClient, TestNetFactory, @@ -73,6 +73,7 @@ type GrandpaBlockImport = crate::GrandpaBlockImport< LongestChain, >; +#[derive(Default)] struct GrandpaTestNet { peers: Vec, test_config: TestApi, @@ -110,16 +111,6 @@ impl TestNetFactory for GrandpaTestNet { type PeerData = PeerData; type BlockImport = GrandpaBlockImport; - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - GrandpaTestNet { peers: Vec::new(), test_config: Default::default() } - } - - fn default_config() -> ProtocolConfig { - // This is unused. - ProtocolConfig::default() - } - fn add_full_peer(&mut self) { self.add_full_peer_with_config(FullPeerConfig { notifications_protocols: vec![grandpa_protocol_name::NAME.into()], @@ -128,12 +119,7 @@ impl TestNetFactory for GrandpaTestNet { }) } - fn make_verifier( - &self, - _client: PeersClient, - _cfg: &ProtocolConfig, - _: &PeerData, - ) -> Self::Verifier { + fn make_verifier(&self, _client: PeersClient, _: &PeerData) -> Self::Verifier { PassThroughVerifier::new(false) // use non-instant finality. } @@ -184,7 +170,7 @@ pub(crate) struct RuntimeApi { impl ProvideRuntimeApi for TestApi { type Api = RuntimeApi; - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { + fn runtime_api(&self) -> ApiRef<'_, Self::Api> { RuntimeApi { inner: self.clone() }.into() } } @@ -224,7 +210,7 @@ impl GenesisAuthoritySetProvider for TestApi { const TEST_GOSSIP_DURATION: Duration = Duration::from_millis(500); fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { - keys.iter().map(|key| key.clone().public().into()).map(|id| (id, 1)).collect() + keys.iter().map(|&key| key.public().into()).map(|id| (id, 1)).collect() } fn create_keystore(authority: Ed25519Keyring) -> (SyncCryptoStorePtr, tempfile::TempDir) { @@ -547,7 +533,7 @@ fn transition_3_voters_twice_1_full_observer() { { let net = net.clone(); let client = net.lock().peers[0].client().clone(); - let peers_c = peers_c.clone(); + let peers_c = *peers_c; // wait for blocks to be finalized before generating new ones let block_production = client diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index 6adce0d920209..fe7caf74422db 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -587,7 +587,7 @@ mod tests { fn import_header(&self, header: Header) { let hash = header.hash(); - let number = header.number().clone(); + let number = *header.number(); self.known_blocks.lock().insert(hash, number); self.sender @@ -608,7 +608,7 @@ mod tests { impl BlockStatusT for TestBlockStatus { fn block_number(&self, hash: Hash) -> Result, Error> { - Ok(self.inner.lock().get(&hash).map(|x| x.clone())) + Ok(self.inner.lock().get(&hash).map(|x| *x)) } } diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index 209b0f1b33ced..fb7754fc0169a 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -89,7 +89,7 @@ where /// can prioritize shorter chains over longer ones, the vote may be /// closer to the best block than N. #[derive(Clone)] -pub struct BeforeBestBlockBy(N); +pub struct BeforeBestBlockBy(pub N); impl VotingRule for BeforeBestBlockBy> where Block: BlockT, @@ -419,7 +419,7 @@ mod tests { } let best = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap(); - let best_number = best.number().clone(); + let best_number = *best.number(); for i in 0u32..5 { let base = client.header(&BlockId::Number(i.into())).unwrap().unwrap(); diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index 90f6828a1105d..a31a0a8b91908 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -23,7 +23,7 @@ use crate::{ BlockNumberOps, GrandpaJustification, SharedAuthoritySet, }; use sc_client_api::Backend as ClientBackend; -use sc_network::warp_request_handler::{EncodedProof, VerificationResult, WarpSyncProvider}; +use sc_network_common::sync::warp::{EncodedProof, VerificationResult, WarpSyncProvider}; use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_finality_grandpa::{AuthorityList, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::{ diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index 528365d62c18b..b3ac5d892fd58 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -19,7 +19,7 @@ futures-timer = "3.0.1" log = "0.4.17" parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } sc-client-api = { version = "4.0.0-dev", path = "../api" } -sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 446ddf47b4cab..0441011b0ec42 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -20,7 +20,13 @@ use crate::OutputFormat; use ansi_term::Colour; use log::info; use sc_client_api::ClientInfo; -use sc_network::{NetworkStatus, SyncState, WarpSyncPhase, WarpSyncProgress}; +use sc_network_common::{ + service::NetworkStatus, + sync::{ + warp::{WarpSyncPhase, WarpSyncProgress}, + SyncState, + }, +}; use sp_runtime::traits::{Block as BlockT, CheckedDiv, NumberFor, Saturating, Zero}; use std::{fmt, time::Instant}; diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index 5dca77f1a7433..52f1c95fe0198 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -24,7 +24,7 @@ use futures_timer::Delay; use log::{debug, info, trace}; use parity_util_mem::MallocSizeOf; use sc_client_api::{BlockchainEvents, UsageProvider}; -use sc_network::NetworkService; +use sc_network_common::service::NetworkStatusProvider; use sc_transaction_pool_api::TransactionPool; use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, Header}; @@ -53,12 +53,13 @@ impl Default for OutputFormat { } /// Builds the informant and returns a `Future` that drives the informant. -pub async fn build( +pub async fn build( client: Arc, - network: Arc::Hash>>, + network: N, pool: Arc

, format: OutputFormat, ) where + N: NetworkStatusProvider, C: UsageProvider + HeaderMetadata + BlockchainEvents, >::Error: Display, P: TransactionPool + MallocSizeOf, diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index be4adba2a52fe..9b7afba759c60 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -14,10 +14,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" hex = "0.4.0" -parking_lot = "0.12.0" -serde_json = "1.0.79" +parking_lot = "0.12.1" +serde_json = "1.0.85" thiserror = "1.0" sp-application-crypto = { version = "6.0.0", path = "../../primitives/application-crypto" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 0fac96b0fdde0..8ecf2b9cec787 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -17,12 +17,13 @@ targets = ["x86_64-unknown-linux-gnu"] ahash = "0.7.6" futures = "0.3.21" futures-timer = "3.0.1" -libp2p = { version = "0.45.1", default-features = false } +libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" lru = "0.7.5" tracing = "0.1.29" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } -sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } +sc-peerset = { version = "4.0.0-dev", path = "../peerset" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 2d086e89b4a10..45eff09332f67 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -21,7 +21,8 @@ use crate::{ Network, Validator, }; -use sc_network::{Event, ReputationChange}; +use sc_network_common::protocol::event::Event; +use sc_peerset::ReputationChange; use futures::{ channel::mpsc::{channel, Receiver, Sender}, @@ -85,7 +86,7 @@ impl GossipEngine { B: 'static, { let protocol = protocol.into(); - let network_event_stream = network.event_stream(); + let network_event_stream = network.event_stream("network-gossip"); GossipEngine { state_machine: ConsensusGossip::new(validator, protocol.clone(), metrics_registry), @@ -151,7 +152,7 @@ impl GossipEngine { /// Send addressed message to the given peers. The message is not kept or multicast /// later on. - pub fn send_message(&mut self, who: Vec, data: Vec) { + pub fn send_message(&mut self, who: Vec, data: Vec) { for who in &who { self.state_machine.send_message(&mut *self.network, who, data.clone()); } @@ -162,7 +163,7 @@ impl GossipEngine { /// Note: this method isn't strictly related to gossiping and should eventually be moved /// somewhere else. pub fn announce(&self, block: B::Hash, associated_data: Option>) { - self.network.announce(block, associated_data); + self.network.announce_block(block, associated_data); } } @@ -181,7 +182,10 @@ impl Future for GossipEngine { this.network.add_set_reserved(remote, this.protocol.clone()); }, Event::SyncDisconnected { remote } => { - this.network.remove_set_reserved(remote, this.protocol.clone()); + this.network.remove_peers_from_reserved_set( + this.protocol.clone(), + vec![remote], + ); }, Event::NotificationStreamOpened { remote, protocol, role, .. } => { if protocol != this.protocol { @@ -304,7 +308,7 @@ impl futures::future::FusedFuture for GossipEngine { #[cfg(test)] mod tests { use super::*; - use crate::{ValidationResult, ValidatorContext}; + use crate::{multiaddr::Multiaddr, ValidationResult, ValidatorContext}; use async_std::task::spawn; use futures::{ channel::mpsc::{unbounded, UnboundedSender}, @@ -312,10 +316,21 @@ mod tests { future::poll_fn, }; use quickcheck::{Arbitrary, Gen, QuickCheck}; - use sc_network::ObservedRole; - use sp_runtime::{testing::H256, traits::Block as BlockT}; + use sc_network_common::{ + config::MultiaddrWithPeerId, + protocol::event::ObservedRole, + service::{ + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NotificationSender, NotificationSenderError, + }, + }; + use sp_runtime::{ + testing::H256, + traits::{Block as BlockT, NumberFor}, + }; use std::{ borrow::Cow, + collections::HashSet, sync::{Arc, Mutex}, }; use substrate_test_runtime_client::runtime::Block; @@ -330,29 +345,119 @@ mod tests { event_senders: Vec>, } - impl Network for TestNetwork { - fn event_stream(&self) -> Pin + Send>> { + impl NetworkPeers for TestNetwork { + fn set_authorized_peers(&self, _peers: HashSet) { + unimplemented!(); + } + + fn set_authorized_only(&self, _reserved_only: bool) { + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) {} + + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + unimplemented!(); + } + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: Vec, + ) { + } + + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } + + fn sync_num_connected(&self) -> usize { + unimplemented!(); + } + } + + impl NetworkEventStream for TestNetwork { + fn event_stream(&self, _name: &'static str) -> Pin + Send>> { let (tx, rx) = unbounded(); self.inner.lock().unwrap().event_senders.push(tx); Box::pin(rx) } + } - fn report_peer(&self, _: PeerId, _: ReputationChange) {} - - fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { + impl NetworkNotification for TestNetwork { + fn write_notification( + &self, + _target: PeerId, + _protocol: Cow<'static, str>, + _message: Vec, + ) { unimplemented!(); } - fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} - - fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn notification_sender( + &self, + _target: PeerId, + _protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + unimplemented!(); + } + } - fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { + impl NetworkBlock<::Hash, NumberFor> for TestNetwork { + fn announce_block(&self, _hash: ::Hash, _data: Option>) { unimplemented!(); } - fn announce(&self, _: B::Hash, _: Option>) { + fn new_best_block_imported( + &self, + _hash: ::Hash, + _number: NumberFor, + ) { unimplemented!(); } } @@ -416,7 +521,7 @@ mod tests { // Register the remote peer. event_sender .start_send(Event::NotificationStreamOpened { - remote: remote_peer.clone(), + remote: remote_peer, protocol: protocol.clone(), negotiated_fallback: None, role: ObservedRole::Authority, @@ -428,7 +533,7 @@ mod tests { .iter() .cloned() .map(|m| Event::NotificationsReceived { - remote: remote_peer.clone(), + remote: remote_peer, messages: vec![(protocol.clone(), m.into())], }) .collect::>(); @@ -458,10 +563,7 @@ mod tests { for subscriber in subscribers.iter_mut() { assert_eq!( subscriber.next(), - Some(TopicNotification { - message: message.clone(), - sender: Some(remote_peer.clone()), - }), + Some(TopicNotification { message: message.clone(), sender: Some(remote_peer) }), ); } } @@ -557,7 +659,7 @@ mod tests { // Create channels. let (txs, mut rxs) = channels .iter() - .map(|ChannelLengthAndTopic { length, topic }| (topic.clone(), channel(*length))) + .map(|ChannelLengthAndTopic { length, topic }| (*topic, channel(*length))) .fold((vec![], vec![]), |mut acc, (topic, (tx, rx))| { acc.0.push((topic, tx)); acc.1.push((topic, rx)); @@ -579,7 +681,7 @@ mod tests { // Register the remote peer. event_sender .start_send(Event::NotificationStreamOpened { - remote: remote_peer.clone(), + remote: remote_peer, protocol: protocol.clone(), negotiated_fallback: None, role: ObservedRole::Authority, @@ -606,10 +708,7 @@ mod tests { .collect(); event_sender - .start_send(Event::NotificationsReceived { - remote: remote_peer.clone(), - messages, - }) + .start_send(Event::NotificationsReceived { remote: remote_peer, messages }) .expect("Event stream is unbounded; qed."); } diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 4b83708702466..0fdde1feac6c6 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -41,9 +41,9 @@ //! messages. //! //! The [`GossipEngine`] will automatically use [`Network::add_set_reserved`] and -//! [`Network::remove_set_reserved`] to maintain a set of peers equal to the set of peers the -//! node is syncing from. See the documentation of `sc-network` for more explanations about the -//! concepts of peer sets. +//! [`NetworkPeers::remove_peers_from_reserved_set`] to maintain a set of peers equal to the set of +//! peers the node is syncing from. See the documentation of `sc-network` for more explanations +//! about the concepts of peer sets. //! //! # What is a validator? //! @@ -67,74 +67,35 @@ pub use self::{ validator::{DiscardAll, MessageIntent, ValidationResult, Validator, ValidatorContext}, }; -use futures::prelude::*; -use sc_network::{multiaddr, Event, ExHashT, NetworkService, PeerId, ReputationChange}; -use sp_runtime::traits::Block as BlockT; -use std::{borrow::Cow, iter, pin::Pin, sync::Arc}; +use libp2p::{multiaddr, PeerId}; +use sc_network_common::service::{ + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, +}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{borrow::Cow, iter}; mod bridge; mod state_machine; mod validator; /// Abstraction over a network. -pub trait Network { - /// Returns a stream of events representing what happens on the network. - fn event_stream(&self) -> Pin + Send>>; - - /// Adjust the reputation of a node. - fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange); - - /// Adds the peer to the set of peers to be connected to with this protocol. - fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>); - - /// Removes the peer from the set of peers to be connected to with this protocol. - fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>); - - /// Force-disconnect a peer. - fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>); - - /// Send a notification to a peer. - fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec); - - /// Notify everyone we're connected to that we have the given block. - /// - /// Note: this method isn't strictly related to gossiping and should eventually be moved - /// somewhere else. - fn announce(&self, block: B::Hash, associated_data: Option>); -} - -impl Network for Arc> { - fn event_stream(&self) -> Pin + Send>> { - Box::pin(NetworkService::event_stream(self, "network-gossip")) - } - - fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange) { - NetworkService::report_peer(self, peer_id, reputation); - } - +pub trait Network: + NetworkPeers + NetworkEventStream + NetworkNotification + NetworkBlock> +{ fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { let addr = iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); - let result = - NetworkService::add_peers_to_reserved_set(self, protocol, iter::once(addr).collect()); + let result = self.add_peers_to_reserved_set(protocol, iter::once(addr).collect()); if let Err(err) = result { log::error!(target: "gossip", "add_set_reserved failed: {}", err); } } +} - fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { - NetworkService::remove_peers_from_reserved_set(self, protocol, iter::once(who).collect()); - } - - fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { - NetworkService::disconnect_peer(self, who, protocol) - } - - fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec) { - NetworkService::write_notification(self, who, protocol, message) - } - - fn announce(&self, block: B::Hash, associated_data: Option>) { - NetworkService::announce_block(self, block, associated_data) - } +impl Network for T where + T: NetworkPeers + + NetworkEventStream + + NetworkNotification + + NetworkBlock> +{ } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 8a016cbaab3da..ff75b4520bf03 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -22,7 +22,7 @@ use ahash::AHashSet; use libp2p::PeerId; use lru::LruCache; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network::ObservedRole; +use sc_network_common::protocol::event::ObservedRole; use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; use std::{borrow::Cow, collections::HashMap, iter, sync::Arc, time, time::Instant}; @@ -42,9 +42,9 @@ const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_millis(750); pub(crate) const PERIODIC_MAINTENANCE_INTERVAL: time::Duration = time::Duration::from_millis(1100); mod rep { - use sc_network::ReputationChange as Rep; + use sc_peerset::ReputationChange as Rep; /// Reputation change when a peer sends us a gossip message that we didn't know about. - pub const GOSSIP_SUCCESS: Rep = Rep::new(1 << 4, "Successfull gossip"); + pub const GOSSIP_SUCCESS: Rep = Rep::new(1 << 4, "Successful gossip"); /// Reputation change when a peer sends us a gossip message that we already knew about. pub const DUPLICATE_GOSSIP: Rep = Rep::new(-(1 << 2), "Duplicate gossip"); } @@ -511,11 +511,24 @@ impl Metrics { #[cfg(test)] mod tests { use super::*; + use crate::multiaddr::Multiaddr; use futures::prelude::*; - use sc_network::{Event, ReputationChange}; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256}; + use sc_network_common::{ + config::MultiaddrWithPeerId, + protocol::event::Event, + service::{ + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NotificationSender, NotificationSenderError, + }, + }; + use sc_peerset::ReputationChange; + use sp_runtime::{ + testing::{Block as RawBlock, ExtrinsicWrapper, H256}, + traits::NumberFor, + }; use std::{ borrow::Cow, + collections::HashSet, pin::Pin, sync::{Arc, Mutex}, }; @@ -569,28 +582,118 @@ mod tests { peer_reports: Vec<(PeerId, ReputationChange)>, } - impl Network for NoOpNetwork { - fn event_stream(&self) -> Pin + Send>> { + impl NetworkPeers for NoOpNetwork { + fn set_authorized_peers(&self, _peers: HashSet) { + unimplemented!(); + } + + fn set_authorized_only(&self, _reserved_only: bool) { + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + self.inner.lock().unwrap().peer_reports.push((who, cost_benefit)); + } + + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + unimplemented!(); + } + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { unimplemented!(); } - fn report_peer(&self, peer_id: PeerId, reputation_change: ReputationChange) { - self.inner.lock().unwrap().peer_reports.push((peer_id, reputation_change)); + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: Vec, + ) { } - fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { unimplemented!(); } - fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } - fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn sync_num_connected(&self) -> usize { + unimplemented!(); + } + } - fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { + impl NetworkEventStream for NoOpNetwork { + fn event_stream(&self, _name: &'static str) -> Pin + Send>> { unimplemented!(); } + } - fn announce(&self, _: B::Hash, _: Option>) { + impl NetworkNotification for NoOpNetwork { + fn write_notification( + &self, + _target: PeerId, + _protocol: Cow<'static, str>, + _message: Vec, + ) { + unimplemented!(); + } + + fn notification_sender( + &self, + _target: PeerId, + _protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + unimplemented!(); + } + } + + impl NetworkBlock<::Hash, NumberFor> for NoOpNetwork { + fn announce_block(&self, _hash: ::Hash, _data: Option>) { + unimplemented!(); + } + + fn new_best_block_imported( + &self, + _hash: ::Hash, + _number: NumberFor, + ) { unimplemented!(); } } @@ -708,7 +811,7 @@ mod tests { .on_incoming( &mut network, // Unregistered peer. - remote.clone(), + remote, vec![vec![1, 2, 3]], ); diff --git a/client/network-gossip/src/validator.rs b/client/network-gossip/src/validator.rs index 7d60f7b31397f..185c2cfeed2c7 100644 --- a/client/network-gossip/src/validator.rs +++ b/client/network-gossip/src/validator.rs @@ -16,7 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_network::{ObservedRole, PeerId}; +use libp2p::PeerId; +use sc_network_common::protocol::event::ObservedRole; use sp_runtime::traits::Block as BlockT; /// Validates consensus messages. diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 1b525844395b5..82f77fa44450f 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -29,17 +29,17 @@ futures = "0.3.21" futures-timer = "3.0.2" hex = "0.4.0" ip_network = "0.4.1" -libp2p = "0.45.1" +libp2p = "0.46.1" linked_hash_set = "0.1.3" linked-hash-map = "0.5.4" log = "0.4.17" lru = "0.7.5" -parking_lot = "0.12.0" +parking_lot = "0.12.1" pin-project = "1.0.10" prost = "0.10" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" +serde_json = "1.0.85" smallvec = "1.8.0" thiserror = "1.0" unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } @@ -51,15 +51,12 @@ sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-network-common = { version = "0.10.0-dev", path = "./common" } -sc-network-light = { version = "0.10.0-dev", path = "./light" } -sc-network-sync = { version = "0.10.0-dev", path = "./sync" } sc-peerset = { version = "4.0.0-dev", path = "../peerset" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-arithmetic = { version = "5.0.0", path = "../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } sp-core = { version = "6.0.0", path = "../../primitives/core" } -sp-finality-grandpa = { version = "4.0.0-dev", path = "../../primitives/finality-grandpa" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] @@ -67,6 +64,8 @@ assert_matches = "1.3" async-std = "1.11.0" rand = "0.7.2" tempfile = "3.1.0" +sc-network-light = { version = "0.10.0-dev", path = "./light" } +sc-network-sync = { version = "0.10.0-dev", path = "./sync" } sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index 553eb57958a5d..ea50c8b794ccb 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -17,10 +17,19 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.10" [dependencies] +async-trait = "0.1.57" +bitflags = "1.3.2" +bytes = "1" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" -libp2p = "0.45.1" +libp2p = "0.46.1" smallvec = "1.8.0" +sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } +serde = { version = "1.0.136", features = ["derive"] } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } +sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } +thiserror = "1.0" diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index 92f8df5cd380f..c2e61424c88e0 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -18,7 +18,8 @@ //! Configuration of the networking layer. -use std::{fmt, str}; +use libp2p::{multiaddr, Multiaddr, PeerId}; +use std::{fmt, str, str::FromStr}; /// Name of a protocol, transmitted on the wire. Should be unique for each chain. Always UTF-8. #[derive(Clone, PartialEq, Eq, Hash)] @@ -42,3 +43,129 @@ impl fmt::Debug for ProtocolId { fmt::Debug::fmt(self.as_ref(), f) } } + +/// Parses a string address and splits it into Multiaddress and PeerId, if +/// valid. +/// +/// # Example +/// +/// ``` +/// # use libp2p::{Multiaddr, PeerId}; +/// # use sc_network_common::config::parse_str_addr; +/// let (peer_id, addr) = parse_str_addr( +/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" +/// ).unwrap(); +/// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::().unwrap()); +/// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::().unwrap()); +/// ``` +pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { + let addr: Multiaddr = addr_str.parse()?; + parse_addr(addr) +} + +/// Splits a Multiaddress into a Multiaddress and PeerId. +pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { + let who = match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => + PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)?, + _ => return Err(ParseErr::PeerIdMissing), + }; + + Ok((who, addr)) +} + +/// Address of a node, including its identity. +/// +/// This struct represents a decoded version of a multiaddress that ends with `/p2p/`. +/// +/// # Example +/// +/// ``` +/// # use libp2p::{Multiaddr, PeerId}; +/// # use sc_network_common::config::MultiaddrWithPeerId; +/// let addr: MultiaddrWithPeerId = +/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse().unwrap(); +/// assert_eq!(addr.peer_id.to_base58(), "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"); +/// assert_eq!(addr.multiaddr.to_string(), "/ip4/198.51.100.19/tcp/30333"); +/// ``` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq)] +#[serde(try_from = "String", into = "String")] +pub struct MultiaddrWithPeerId { + /// Address of the node. + pub multiaddr: Multiaddr, + /// Its identity. + pub peer_id: PeerId, +} + +impl MultiaddrWithPeerId { + /// Concatenates the multiaddress and peer ID into one multiaddress containing both. + pub fn concat(&self) -> Multiaddr { + let proto = multiaddr::Protocol::P2p(From::from(self.peer_id)); + self.multiaddr.clone().with(proto) + } +} + +impl fmt::Display for MultiaddrWithPeerId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.concat(), f) + } +} + +impl FromStr for MultiaddrWithPeerId { + type Err = ParseErr; + + fn from_str(s: &str) -> Result { + let (peer_id, multiaddr) = parse_str_addr(s)?; + Ok(Self { peer_id, multiaddr }) + } +} + +impl From for String { + fn from(ma: MultiaddrWithPeerId) -> String { + format!("{}", ma) + } +} + +impl TryFrom for MultiaddrWithPeerId { + type Error = ParseErr; + fn try_from(string: String) -> Result { + string.parse() + } +} + +/// Error that can be generated by `parse_str_addr`. +#[derive(Debug)] +pub enum ParseErr { + /// Error while parsing the multiaddress. + MultiaddrParse(multiaddr::Error), + /// Multihash of the peer ID is invalid. + InvalidPeerId, + /// The peer ID is missing from the address. + PeerIdMissing, +} + +impl fmt::Display for ParseErr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::MultiaddrParse(err) => write!(f, "{}", err), + Self::InvalidPeerId => write!(f, "Peer id at the end of the address is invalid"), + Self::PeerIdMissing => write!(f, "Peer id is missing from the address"), + } + } +} + +impl std::error::Error for ParseErr { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Self::MultiaddrParse(err) => Some(err), + Self::InvalidPeerId => None, + Self::PeerIdMissing => None, + } + } +} + +impl From for ParseErr { + fn from(err: multiaddr::Error) -> ParseErr { + Self::MultiaddrParse(err) + } +} diff --git a/client/network/common/src/lib.rs b/client/network/common/src/lib.rs index 81769e23debbb..3a30d24900199 100644 --- a/client/network/common/src/lib.rs +++ b/client/network/common/src/lib.rs @@ -20,4 +20,7 @@ pub mod config; pub mod message; +pub mod protocol; pub mod request_responses; +pub mod service; +pub mod sync; diff --git a/client/network/common/src/protocol.rs b/client/network/common/src/protocol.rs new file mode 100644 index 0000000000000..0fd36cb511112 --- /dev/null +++ b/client/network/common/src/protocol.rs @@ -0,0 +1,19 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +pub mod event; diff --git a/client/network/src/protocol/event.rs b/client/network/common/src/protocol/event.rs similarity index 96% rename from client/network/src/protocol/event.rs rename to client/network/common/src/protocol/event.rs index 26c9544960605..c6fb4a2faf9f9 100644 --- a/client/network/src/protocol/event.rs +++ b/client/network/common/src/protocol/event.rs @@ -67,14 +67,14 @@ pub enum Event { remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. /// This is always equal to the value of - /// [`crate::config::NonDefaultSetConfig::notifications_protocol`] of one of the + /// `sc_network::config::NonDefaultSetConfig::notifications_protocol` of one of the /// configured sets. protocol: Cow<'static, str>, /// If the negotiation didn't use the main name of the protocol (the one in /// `notifications_protocol`), then this field contains which name has actually been /// used. /// Always contains a value equal to the value in - /// [`crate::config::NonDefaultSetConfig::fallback_names`]. + /// `sc_network::config::NonDefaultSetConfig::fallback_names`. negotiated_fallback: Option>, /// Role of the remote. role: ObservedRole, diff --git a/client/network/common/src/request_responses.rs b/client/network/common/src/request_responses.rs index 71570e6beb864..a216c9c2d254d 100644 --- a/client/network/common/src/request_responses.rs +++ b/client/network/common/src/request_responses.rs @@ -19,7 +19,7 @@ //! Collection of generic data structures for request-response protocols. use futures::channel::{mpsc, oneshot}; -use libp2p::PeerId; +use libp2p::{request_response::OutboundFailure, PeerId}; use sc_peerset::ReputationChange; use std::{borrow::Cow, time::Duration}; @@ -29,6 +29,9 @@ pub struct ProtocolConfig { /// Name of the protocol on the wire. Should be something like `/foo/bar`. pub name: Cow<'static, str>, + /// Fallback on the wire protocol names to support. + pub fallback_names: Vec>, + /// Maximum allowed size, in bytes, of a request. /// /// Any request larger than this value will be declined as a way to avoid allocating too @@ -112,3 +115,40 @@ pub struct OutgoingResponse { /// > written to the buffer managed by the operating system. pub sent_feedback: Option>, } + +/// When sending a request, what to do on a disconnected recipient. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum IfDisconnected { + /// Try to connect to the peer. + TryConnect, + /// Just fail if the destination is not yet connected. + ImmediateError, +} + +/// Convenience functions for `IfDisconnected`. +impl IfDisconnected { + /// Shall we connect to a disconnected peer? + pub fn should_connect(self) -> bool { + match self { + Self::TryConnect => true, + Self::ImmediateError => false, + } + } +} + +/// Error in a request. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum RequestFailure { + #[error("We are not currently connected to the requested peer.")] + NotConnected, + #[error("Given protocol hasn't been registered.")] + UnknownProtocol, + #[error("Remote has closed the substream before answering, thereby signaling that it considers the request as valid, but refused to answer it.")] + Refused, + #[error("The remote replied, but the local node is no longer interested in the response.")] + Obsolete, + /// Problem on the network. + #[error("Problem on the network: {0}")] + Network(OutboundFailure), +} diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs new file mode 100644 index 0000000000000..9544cddf013cf --- /dev/null +++ b/client/network/common/src/service.rs @@ -0,0 +1,660 @@ +// This file is part of Substrate. +// +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +// +// If you read this, you are very thorough, congratulations. + +use crate::{ + config::MultiaddrWithPeerId, + protocol::event::Event, + request_responses::{IfDisconnected, RequestFailure}, + sync::{warp::WarpSyncProgress, StateDownloadProgress, SyncState}, +}; +use futures::{channel::oneshot, Stream}; +pub use libp2p::{identity::error::SigningError, kad::record::Key as KademliaKey}; +use libp2p::{Multiaddr, PeerId}; +use sc_peerset::ReputationChange; +pub use signature::Signature; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{borrow::Cow, collections::HashSet, future::Future, pin::Pin, sync::Arc}; + +mod signature; + +/// Signer with network identity +pub trait NetworkSigner { + /// Signs the message with the `KeyPair` that defines the local [`PeerId`]. + fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result; +} + +impl NetworkSigner for Arc +where + T: ?Sized, + T: NetworkSigner, +{ + fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result { + T::sign_with_local_identity(self, msg) + } +} + +/// Provides access to the networking DHT. +pub trait NetworkDHTProvider { + /// Start getting a value from the DHT. + fn get_value(&self, key: &KademliaKey); + + /// Start putting a value in the DHT. + fn put_value(&self, key: KademliaKey, value: Vec); +} + +impl NetworkDHTProvider for Arc +where + T: ?Sized, + T: NetworkDHTProvider, +{ + fn get_value(&self, key: &KademliaKey) { + T::get_value(self, key) + } + + fn put_value(&self, key: KademliaKey, value: Vec) { + T::put_value(self, key, value) + } +} + +/// Provides an ability to set a fork sync request for a particular block. +pub trait NetworkSyncForkRequest { + /// Notifies the sync service to try and sync the given block from the given + /// peers. + /// + /// If the given vector of peers is empty then the underlying implementation + /// should make a best effort to fetch the block from any peers it is + /// connected to (NOTE: this assumption will change in the future #3629). + fn set_sync_fork_request(&self, peers: Vec, hash: BlockHash, number: BlockNumber); +} + +impl NetworkSyncForkRequest for Arc +where + T: ?Sized, + T: NetworkSyncForkRequest, +{ + fn set_sync_fork_request(&self, peers: Vec, hash: BlockHash, number: BlockNumber) { + T::set_sync_fork_request(self, peers, hash, number) + } +} + +/// Overview status of the network. +#[derive(Clone)] +pub struct NetworkStatus { + /// Current global sync state. + pub sync_state: SyncState, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_sync_peers: u32, + /// Total number of connected peers + pub num_connected_peers: usize, + /// Total number of active peers. + pub num_active_peers: usize, + /// The total number of bytes received. + pub total_bytes_inbound: u64, + /// The total number of bytes sent. + pub total_bytes_outbound: u64, + /// State sync in progress. + pub state_sync: Option, + /// Warp sync in progress. + pub warp_sync: Option>, +} + +/// Provides high-level status information about network. +#[async_trait::async_trait] +pub trait NetworkStatusProvider { + /// High-level network status information. + /// + /// Returns an error if the `NetworkWorker` is no longer running. + async fn status(&self) -> Result, ()>; +} + +// Manual implementation to avoid extra boxing here +impl NetworkStatusProvider for Arc +where + T: ?Sized, + T: NetworkStatusProvider, +{ + fn status<'life0, 'async_trait>( + &'life0 self, + ) -> Pin, ()>> + Send + 'async_trait>> + where + 'life0: 'async_trait, + Self: 'async_trait, + { + T::status(self) + } +} + +/// Provides low-level API for manipulating network peers. +pub trait NetworkPeers { + /// Set authorized peers. + /// + /// Need a better solution to manage authorized peers, but now just use reserved peers for + /// prototyping. + fn set_authorized_peers(&self, peers: HashSet); + + /// Set authorized_only flag. + /// + /// Need a better solution to decide authorized_only, but now just use reserved_only flag for + /// prototyping. + fn set_authorized_only(&self, reserved_only: bool); + + /// Adds an address known to a node. + fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr); + + /// Report a given peer as either beneficial (+) or costly (-) according to the + /// given scalar. + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange); + + /// Disconnect from a node as soon as possible. + /// + /// This triggers the same effects as if the connection had closed itself spontaneously. + /// + /// See also [`NetworkPeers::remove_from_peers_set`], which has the same effect but also + /// prevents the local node from re-establishing an outgoing substream to this peer until it + /// is added again. + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>); + + /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. + fn accept_unreserved_peers(&self); + + /// Disconnect from unreserved peers and deny new unreserved peers to connect for syncing + /// purposes. + fn deny_unreserved_peers(&self); + + /// Adds a `PeerId` and its `Multiaddr` as reserved. + /// + /// Returns an `Err` if the given string is not a valid multiaddress + /// or contains an invalid peer ID (which includes the local peer ID). + fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String>; + + /// Removes a `PeerId` from the list of reserved peers. + fn remove_reserved_peer(&self, peer_id: PeerId); + + /// Sets the reserved set of a protocol to the given set of peers. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// The node will start establishing/accepting connections and substreams to/from peers in this + /// set, if it doesn't have any substream open with them yet. + /// + /// Note however, if a call to this function results in less peers on the reserved set, they + /// will not necessarily get disconnected (depending on available free slots in the peer set). + /// If you want to also disconnect those removed peers, you will have to call + /// `remove_from_peers_set` on those in addition to updating the reserved set. You can omit + /// this step if the peer set is in reserved only mode. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn set_reserved_peers( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String>; + + /// Add peers to a peer set. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn add_peers_to_reserved_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String>; + + /// Remove peers from a peer set. + fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec); + + /// Add a peer to a set of peers. + /// + /// If the set has slots available, it will try to open a substream with this peer. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn add_to_peers_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String>; + + /// Remove peers from a peer set. + /// + /// If we currently have an open substream with this peer, it will soon be closed. + fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec); + + /// Returns the number of peers in the sync peer set we're connected to. + fn sync_num_connected(&self) -> usize; +} + +// Manual implementation to avoid extra boxing here +impl NetworkPeers for Arc +where + T: ?Sized, + T: NetworkPeers, +{ + fn set_authorized_peers(&self, peers: HashSet) { + T::set_authorized_peers(self, peers) + } + + fn set_authorized_only(&self, reserved_only: bool) { + T::set_authorized_only(self, reserved_only) + } + + fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { + T::add_known_address(self, peer_id, addr) + } + + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + T::report_peer(self, who, cost_benefit) + } + + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { + T::disconnect_peer(self, who, protocol) + } + + fn accept_unreserved_peers(&self) { + T::accept_unreserved_peers(self) + } + + fn deny_unreserved_peers(&self) { + T::deny_unreserved_peers(self) + } + + fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { + T::add_reserved_peer(self, peer) + } + + fn remove_reserved_peer(&self, peer_id: PeerId) { + T::remove_reserved_peer(self, peer_id) + } + + fn set_reserved_peers( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { + T::set_reserved_peers(self, protocol, peers) + } + + fn add_peers_to_reserved_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { + T::add_peers_to_reserved_set(self, protocol, peers) + } + + fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { + T::remove_peers_from_reserved_set(self, protocol, peers) + } + + fn add_to_peers_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { + T::add_to_peers_set(self, protocol, peers) + } + + fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { + T::remove_from_peers_set(self, protocol, peers) + } + + fn sync_num_connected(&self) -> usize { + T::sync_num_connected(self) + } +} + +/// Provides access to network-level event stream. +pub trait NetworkEventStream { + /// Returns a stream containing the events that happen on the network. + /// + /// If this method is called multiple times, the events are duplicated. + /// + /// The stream never ends (unless the `NetworkWorker` gets shut down). + /// + /// The name passed is used to identify the channel in the Prometheus metrics. Note that the + /// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having + /// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory + fn event_stream(&self, name: &'static str) -> Pin + Send>>; +} + +impl NetworkEventStream for Arc +where + T: ?Sized, + T: NetworkEventStream, +{ + fn event_stream(&self, name: &'static str) -> Pin + Send>> { + T::event_stream(self, name) + } +} + +/// Trait for providing information about the local network state +pub trait NetworkStateInfo { + /// Returns the local external addresses. + fn external_addresses(&self) -> Vec; + + /// Returns the local Peer ID. + fn local_peer_id(&self) -> PeerId; +} + +impl NetworkStateInfo for Arc +where + T: ?Sized, + T: NetworkStateInfo, +{ + fn external_addresses(&self) -> Vec { + T::external_addresses(self) + } + + fn local_peer_id(&self) -> PeerId { + T::local_peer_id(self) + } +} + +/// Reserved slot in the notifications buffer, ready to accept data. +pub trait NotificationSenderReady { + /// Consumes this slots reservation and actually queues the notification. + /// + /// NOTE: Traits can't consume itself, but calling this method second time will return an error. + fn send(&mut self, notification: Vec) -> Result<(), NotificationSenderError>; +} + +/// A `NotificationSender` allows for sending notifications to a peer with a chosen protocol. +#[async_trait::async_trait] +pub trait NotificationSender: Send + Sync + 'static { + /// Returns a future that resolves when the `NotificationSender` is ready to send a + /// notification. + async fn ready(&self) + -> Result, NotificationSenderError>; +} + +/// Error returned by [`NetworkNotification::notification_sender`]. +#[derive(Debug, thiserror::Error)] +pub enum NotificationSenderError { + /// The notification receiver has been closed, usually because the underlying connection + /// closed. + /// + /// Some of the notifications most recently sent may not have been received. However, + /// the peer may still be connected and a new `NotificationSender` for the same + /// protocol obtained from [`NetworkNotification::notification_sender`]. + #[error("The notification receiver has been closed")] + Closed, + /// Protocol name hasn't been registered. + #[error("Protocol name hasn't been registered")] + BadProtocol, +} + +/// Provides ability to send network notifications. +pub trait NetworkNotification { + /// Appends a notification to the buffer of pending outgoing notifications with the given peer. + /// Has no effect if the notifications channel with this protocol name is not open. + /// + /// If the buffer of pending outgoing notifications with that peer is full, the notification + /// is silently dropped and the connection to the remote will start being shut down. This + /// happens if you call this method at a higher rate than the rate at which the peer processes + /// these notifications, or if the available network bandwidth is too low. + /// + /// For this reason, this method is considered soft-deprecated. You are encouraged to use + /// [`NetworkNotification::notification_sender`] instead. + /// + /// > **Note**: The reason why this is a no-op in the situation where we have no channel is + /// > that we don't guarantee message delivery anyway. Networking issues can cause + /// > connections to drop at any time, and higher-level logic shouldn't differentiate + /// > between the remote voluntarily closing a substream or a network error + /// > preventing the message from being delivered. + /// + /// The protocol must have been registered with + /// `crate::config::NetworkConfiguration::notifications_protocols`. + fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec); + + /// Obtains a [`NotificationSender`] for a connected peer, if it exists. + /// + /// A `NotificationSender` is scoped to a particular connection to the peer that holds + /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two + /// steps: + /// + /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready + /// for another notification, yielding a [`NotificationSenderReady`] token. + /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation + /// can only fail if the underlying notification substream or connection has suddenly closed. + /// + /// An error is returned by [`NotificationSenderReady::send`] if there exists no open + /// notifications substream with that combination of peer and protocol, or if the remote + /// has asked to close the notifications substream. If that happens, it is guaranteed that an + /// [`Event::NotificationStreamClosed`] has been generated on the stream returned by + /// [`NetworkEventStream::event_stream`]. + /// + /// If the remote requests to close the notifications substream, all notifications successfully + /// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the + /// substream actually gets closed, but attempting to enqueue more notifications will now + /// return an error. It is however possible for the entire connection to be abruptly closed, + /// in which case enqueued notifications will be lost. + /// + /// The protocol must have been registered with + /// `crate::config::NetworkConfiguration::notifications_protocols`. + /// + /// # Usage + /// + /// This method returns a struct that allows waiting until there is space available in the + /// buffer of messages towards the given peer. If the peer processes notifications at a slower + /// rate than we send them, this buffer will quickly fill up. + /// + /// As such, you should never do something like this: + /// + /// ```ignore + /// // Do NOT do this + /// for peer in peers { + /// if let Ok(n) = network.notification_sender(peer, ...) { + /// if let Ok(s) = n.ready().await { + /// let _ = s.send(...); + /// } + /// } + /// } + /// ``` + /// + /// Doing so would slow down all peers to the rate of the slowest one. A malicious or + /// malfunctioning peer could intentionally process notifications at a very slow rate. + /// + /// Instead, you are encouraged to maintain your own buffer of notifications on top of the one + /// maintained by `sc-network`, and use `notification_sender` to progressively send out + /// elements from your buffer. If this additional buffer is full (which will happen at some + /// point if the peer is too slow to process notifications), appropriate measures can be taken, + /// such as removing non-critical notifications from the buffer or disconnecting the peer + /// using [`NetworkPeers::disconnect_peer`]. + /// + /// + /// Notifications Per-peer buffer + /// broadcast +-------> of notifications +--> `notification_sender` +--> Internet + /// ^ (not covered by + /// | sc-network) + /// + + /// Notifications should be dropped + /// if buffer is full + /// + /// + /// See also the `sc-network-gossip` crate for a higher-level way to send notifications. + fn notification_sender( + &self, + target: PeerId, + protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError>; +} + +impl NetworkNotification for Arc +where + T: ?Sized, + T: NetworkNotification, +{ + fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec) { + T::write_notification(self, target, protocol, message) + } + + fn notification_sender( + &self, + target: PeerId, + protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + T::notification_sender(self, target, protocol) + } +} + +/// Provides ability to send network requests. +#[async_trait::async_trait] +pub trait NetworkRequest { + /// Sends a single targeted request to a specific peer. On success, returns the response of + /// the peer. + /// + /// Request-response protocols are a way to complement notifications protocols, but + /// notifications should remain the default ways of communicating information. For example, a + /// peer can announce something through a notification, after which the recipient can obtain + /// more information by performing a request. + /// As such, call this function with `IfDisconnected::ImmediateError` for `connect`. This way + /// you will get an error immediately for disconnected peers, instead of waiting for a + /// potentially very long connection attempt, which would suggest that something is wrong + /// anyway, as you are supposed to be connected because of the notification protocol. + /// + /// No limit or throttling of concurrent outbound requests per peer and protocol are enforced. + /// Such restrictions, if desired, need to be enforced at the call site(s). + /// + /// The protocol must have been registered through + /// `NetworkConfiguration::request_response_protocols`. + async fn request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + connect: IfDisconnected, + ) -> Result, RequestFailure>; + + /// Variation of `request` which starts a request whose response is delivered on a provided + /// channel. + /// + /// Instead of blocking and waiting for a reply, this function returns immediately, sending + /// responses via the passed in sender. This alternative API exists to make it easier to + /// integrate with message passing APIs. + /// + /// Keep in mind that the connected receiver might receive a `Canceled` event in case of a + /// closing connection. This is expected behaviour. With `request` you would get a + /// `RequestFailure::Network(OutboundFailure::ConnectionClosed)` in that case. + fn start_request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + tx: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, + ); +} + +// Manual implementation to avoid extra boxing here +impl NetworkRequest for Arc +where + T: ?Sized, + T: NetworkRequest, +{ + fn request<'life0, 'async_trait>( + &'life0 self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + connect: IfDisconnected, + ) -> Pin, RequestFailure>> + Send + 'async_trait>> + where + 'life0: 'async_trait, + Self: 'async_trait, + { + T::request(self, target, protocol, request, connect) + } + + fn start_request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + tx: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, + ) { + T::start_request(self, target, protocol, request, tx, connect) + } +} + +/// Provides ability to propagate transactions over the network. +pub trait NetworkTransaction { + /// You may call this when new transactions are imported by the transaction pool. + /// + /// All transactions will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + fn trigger_repropagate(&self); + + /// You must call when new transaction is imported by the transaction pool. + /// + /// This transaction will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + fn propagate_transaction(&self, hash: H); +} + +impl NetworkTransaction for Arc +where + T: ?Sized, + T: NetworkTransaction, +{ + fn trigger_repropagate(&self) { + T::trigger_repropagate(self) + } + + fn propagate_transaction(&self, hash: H) { + T::propagate_transaction(self, hash) + } +} + +/// Provides ability to announce blocks to the network. +pub trait NetworkBlock { + /// Make sure an important block is propagated to peers. + /// + /// In chain-based consensus, we often need to make sure non-best forks are + /// at least temporarily synced. This function forces such an announcement. + fn announce_block(&self, hash: BlockHash, data: Option>); + + /// Inform the network service about new best imported block. + fn new_best_block_imported(&self, hash: BlockHash, number: BlockNumber); +} + +impl NetworkBlock for Arc +where + T: ?Sized, + T: NetworkBlock, +{ + fn announce_block(&self, hash: BlockHash, data: Option>) { + T::announce_block(self, hash, data) + } + + fn new_best_block_imported(&self, hash: BlockHash, number: BlockNumber) { + T::new_best_block_imported(self, hash, number) + } +} diff --git a/client/network/src/service/signature.rs b/client/network/common/src/service/signature.rs similarity index 96% rename from client/network/src/service/signature.rs rename to client/network/common/src/service/signature.rs index d21d28a3007b5..602ef3d82979a 100644 --- a/client/network/src/service/signature.rs +++ b/client/network/common/src/service/signature.rs @@ -18,7 +18,10 @@ // // If you read this, you are very thorough, congratulations. -use super::*; +use libp2p::{ + identity::{error::SigningError, Keypair, PublicKey}, + PeerId, +}; /// A result of signing a message with a network identity. Since `PeerId` is potentially a hash of a /// `PublicKey`, you need to reveal the `PublicKey` next to the signature, so the verifier can check diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs new file mode 100644 index 0000000000000..2ee8f8c51814b --- /dev/null +++ b/client/network/common/src/sync.rs @@ -0,0 +1,394 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Abstract interfaces and data structures related to network sync. + +pub mod message; +pub mod metrics; +pub mod warp; + +use libp2p::PeerId; +use message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}; +use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; +use sp_consensus::BlockOrigin; +use sp_runtime::{ + traits::{Block as BlockT, NumberFor}, + Justifications, +}; +use std::{any::Any, fmt, fmt::Formatter, task::Poll}; +use warp::{EncodedProof, WarpProofRequest, WarpSyncProgress}; + +/// The sync status of a peer we are trying to sync with +#[derive(Debug)] +pub struct PeerInfo { + /// Their best block hash. + pub best_hash: Block::Hash, + /// Their best block number. + pub best_number: NumberFor, +} + +/// Reported sync state. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum SyncState { + /// Initial sync is complete, keep-up sync is active. + Idle, + /// Actively catching up with the chain. + Downloading, +} + +/// Reported state download progress. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct StateDownloadProgress { + /// Estimated download percentage. + pub percentage: u32, + /// Total state size in bytes downloaded so far. + pub size: u64, +} + +/// Syncing status and statistics. +#[derive(Clone)] +pub struct SyncStatus { + /// Current global sync state. + pub state: SyncState, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_peers: u32, + /// Number of blocks queued for import + pub queued_blocks: u32, + /// State sync status in progress, if any. + pub state_sync: Option, + /// Warp sync in progress, if any. + pub warp_sync: Option>, +} + +/// A peer did not behave as expected and should be reported. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BadPeer(pub PeerId, pub sc_peerset::ReputationChange); + +impl fmt::Display for BadPeer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Bad peer {}; Reputation change: {:?}", self.0, self.1) + } +} + +impl std::error::Error for BadPeer {} + +/// Result of [`ChainSync::on_block_data`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OnBlockData { + /// The block should be imported. + Import(BlockOrigin, Vec>), + /// A new block request needs to be made to the given peer. + Request(PeerId, BlockRequest), +} + +/// Result of [`ChainSync::on_block_justification`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OnBlockJustification { + /// The justification needs no further handling. + Nothing, + /// The justification should be imported. + Import { + peer: PeerId, + hash: Block::Hash, + number: NumberFor, + justifications: Justifications, + }, +} + +/// Result of [`ChainSync::on_state_data`]. +#[derive(Debug)] +pub enum OnStateData { + /// The block and state that should be imported. + Import(BlockOrigin, IncomingBlock), + /// A new state request needs to be made to the given peer. + Continue, +} + +/// Result of [`ChainSync::poll_block_announce_validation`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PollBlockAnnounceValidation { + /// The announcement failed at validation. + /// + /// The peer reputation should be decreased. + Failure { + /// Who sent the processed block announcement? + who: PeerId, + /// Should the peer be disconnected? + disconnect: bool, + }, + /// The announcement does not require further handling. + Nothing { + /// Who sent the processed block announcement? + who: PeerId, + /// Was this their new best block? + is_best: bool, + /// The announcement. + announce: BlockAnnounce, + }, + /// The announcement header should be imported. + ImportHeader { + /// Who sent the processed block announcement? + who: PeerId, + /// Was this their new best block? + is_best: bool, + /// The announcement. + announce: BlockAnnounce, + }, + /// The block announcement should be skipped. + Skip, +} + +/// Operation mode. +#[derive(Debug, PartialEq, Eq)] +pub enum SyncMode { + // Sync headers only + Light, + // Sync headers and block bodies + Full, + // Sync headers and the last finalied state + LightState { storage_chain_mode: bool, skip_proofs: bool }, + // Warp sync mode. + Warp, +} + +#[derive(Debug)] +pub struct Metrics { + pub queued_blocks: u32, + pub fork_targets: u32, + pub justifications: metrics::Metrics, +} + +/// Wrapper for implementation-specific state request. +/// +/// NOTE: Implementation must be able to encode and decode it for network purposes. +pub struct OpaqueStateRequest(pub Box); + +impl fmt::Debug for OpaqueStateRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpaqueStateRequest").finish() + } +} + +/// Wrapper for implementation-specific state response. +/// +/// NOTE: Implementation must be able to encode and decode it for network purposes. +pub struct OpaqueStateResponse(pub Box); + +impl fmt::Debug for OpaqueStateResponse { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpaqueStateResponse").finish() + } +} + +/// Wrapper for implementation-specific block request. +/// +/// NOTE: Implementation must be able to encode and decode it for network purposes. +pub struct OpaqueBlockRequest(pub Box); + +impl fmt::Debug for OpaqueBlockRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpaqueBlockRequest").finish() + } +} + +/// Wrapper for implementation-specific block response. +/// +/// NOTE: Implementation must be able to encode and decode it for network purposes. +pub struct OpaqueBlockResponse(pub Box); + +impl fmt::Debug for OpaqueBlockResponse { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpaqueBlockResponse").finish() + } +} + +/// Something that represents the syncing strategy to download past and future blocks of the chain. +pub trait ChainSync: Send { + /// Returns the state of the sync of the given peer. + /// + /// Returns `None` if the peer is unknown. + fn peer_info(&self, who: &PeerId) -> Option>; + + /// Returns the current sync status. + fn status(&self) -> SyncStatus; + + /// Number of active forks requests. This includes + /// requests that are pending or could be issued right away. + fn num_sync_requests(&self) -> usize; + + /// Number of downloaded blocks. + fn num_downloaded_blocks(&self) -> usize; + + /// Returns the current number of peers stored within this state machine. + fn num_peers(&self) -> usize; + + /// Handle a new connected peer. + /// + /// Call this method whenever we connect to a new peer. + fn new_peer( + &mut self, + who: PeerId, + best_hash: Block::Hash, + best_number: NumberFor, + ) -> Result>, BadPeer>; + + /// Signal that a new best block has been imported. + fn update_chain_info(&mut self, best_hash: &Block::Hash, best_number: NumberFor); + + /// Schedule a justification request for the given block. + fn request_justification(&mut self, hash: &Block::Hash, number: NumberFor); + + /// Clear all pending justification requests. + fn clear_justification_requests(&mut self); + + /// Request syncing for the given block from given set of peers. + fn set_sync_fork_request( + &mut self, + peers: Vec, + hash: &Block::Hash, + number: NumberFor, + ); + + /// Get an iterator over all scheduled justification requests. + fn justification_requests( + &mut self, + ) -> Box)> + '_>; + + /// Get an iterator over all block requests of all peers. + fn block_requests(&mut self) -> Box)> + '_>; + + /// Get a state request, if any. + fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)>; + + /// Get a warp sync request, if any. + fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)>; + + /// Handle a response from the remote to a block request that we made. + /// + /// `request` must be the original request that triggered `response`. + /// or `None` if data comes from the block announcement. + /// + /// If this corresponds to a valid block, this outputs the block that + /// must be imported in the import queue. + fn on_block_data( + &mut self, + who: &PeerId, + request: Option>, + response: BlockResponse, + ) -> Result, BadPeer>; + + /// Handle a response from the remote to a state request that we made. + fn on_state_data( + &mut self, + who: &PeerId, + response: OpaqueStateResponse, + ) -> Result, BadPeer>; + + /// Handle a response from the remote to a warp proof request that we made. + fn on_warp_sync_data(&mut self, who: &PeerId, response: EncodedProof) -> Result<(), BadPeer>; + + /// Handle a response from the remote to a justification request that we made. + /// + /// `request` must be the original request that triggered `response`. + fn on_block_justification( + &mut self, + who: PeerId, + response: BlockResponse, + ) -> Result, BadPeer>; + + /// A batch of blocks have been processed, with or without errors. + /// + /// Call this when a batch of blocks have been processed by the import + /// queue, with or without errors. + fn on_blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, Block::Hash)>, + ) -> Box), BadPeer>>>; + + /// Call this when a justification has been processed by the import queue, + /// with or without errors. + fn on_justification_import( + &mut self, + hash: Block::Hash, + number: NumberFor, + success: bool, + ); + + /// Notify about finalization of the given block. + fn on_block_finalized(&mut self, hash: &Block::Hash, number: NumberFor); + + /// Push a block announce validation. + /// + /// It is required that [`ChainSync::poll_block_announce_validation`] is called + /// to check for finished block announce validations. + fn push_block_announce_validation( + &mut self, + who: PeerId, + hash: Block::Hash, + announce: BlockAnnounce, + is_best: bool, + ); + + /// Poll block announce validation. + /// + /// Block announce validations can be pushed by using + /// [`ChainSync::push_block_announce_validation`]. + /// + /// This should be polled until it returns [`Poll::Pending`]. + /// + /// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to + /// import passed header (call `on_block_data`). The network request isn't sent in this case. + fn poll_block_announce_validation( + &mut self, + cx: &mut std::task::Context, + ) -> Poll>; + + /// Call when a peer has disconnected. + /// Canceled obsolete block request may result in some blocks being ready for + /// import, so this functions checks for such blocks and returns them. + fn peer_disconnected(&mut self, who: &PeerId) -> Option>; + + /// Return some key metrics. + fn metrics(&self) -> Metrics; + + /// Create implementation-specific block request. + fn create_opaque_block_request(&self, request: &BlockRequest) -> OpaqueBlockRequest; + + /// Encode implementation-specific block request. + fn encode_block_request(&self, request: &OpaqueBlockRequest) -> Result, String>; + + /// Decode implementation-specific block response. + fn decode_block_response(&self, response: &[u8]) -> Result; + + /// Access blocks from implementation-specific block response. + fn block_response_into_blocks( + &self, + request: &BlockRequest, + response: OpaqueBlockResponse, + ) -> Result>, String>; + + /// Encode implementation-specific state request. + fn encode_state_request(&self, request: &OpaqueStateRequest) -> Result, String>; + + /// Decode implementation-specific state response. + fn decode_state_response(&self, response: &[u8]) -> Result; +} diff --git a/client/network/sync/src/message.rs b/client/network/common/src/sync/message.rs similarity index 99% rename from client/network/sync/src/message.rs rename to client/network/common/src/sync/message.rs index 996ee5231cf2e..27ab2704e6471 100644 --- a/client/network/sync/src/message.rs +++ b/client/network/common/src/sync/message.rs @@ -124,8 +124,8 @@ impl BlockAnnounce { /// Generic types. pub mod generic { use super::{BlockAttributes, BlockState, Direction}; + use crate::message::RequestId; use codec::{Decode, Encode, Input, Output}; - use sc_network_common::message::RequestId; use sp_runtime::{EncodedJustification, Justifications}; /// Block data sent in the response. diff --git a/client/network/common/src/sync/metrics.rs b/client/network/common/src/sync/metrics.rs new file mode 100644 index 0000000000000..15ff090a8ccac --- /dev/null +++ b/client/network/common/src/sync/metrics.rs @@ -0,0 +1,25 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[derive(Debug)] +pub struct Metrics { + pub pending_requests: u32, + pub active_requests: u32, + pub importing_requests: u32, + pub failed_requests: u32, +} diff --git a/client/network/common/src/sync/warp.rs b/client/network/common/src/sync/warp.rs new file mode 100644 index 0000000000000..339a4c33a7eeb --- /dev/null +++ b/client/network/common/src/sync/warp.rs @@ -0,0 +1,94 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use codec::{Decode, Encode}; +pub use sp_finality_grandpa::{AuthorityList, SetId}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::fmt; + +/// Scale-encoded warp sync proof response. +pub struct EncodedProof(pub Vec); + +/// Warp sync request +#[derive(Encode, Decode, Debug)] +pub struct WarpProofRequest { + /// Start collecting proofs from this block. + pub begin: B::Hash, +} + +/// Proof verification result. +pub enum VerificationResult { + /// Proof is valid, but the target was not reached. + Partial(SetId, AuthorityList, Block::Hash), + /// Target finality is proved. + Complete(SetId, AuthorityList, Block::Header), +} + +/// Warp sync backend. Handles retrieveing and verifying warp sync proofs. +pub trait WarpSyncProvider: Send + Sync { + /// Generate proof starting at given block hash. The proof is accumulated until maximum proof + /// size is reached. + fn generate( + &self, + start: Block::Hash, + ) -> Result>; + /// Verify warp proof against current set of authorities. + fn verify( + &self, + proof: &EncodedProof, + set_id: SetId, + authorities: AuthorityList, + ) -> Result, Box>; + /// Get current list of authorities. This is supposed to be genesis authorities when starting + /// sync. + fn current_authorities(&self) -> AuthorityList; +} + +/// Reported warp sync phase. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum WarpSyncPhase { + /// Waiting for peers to connect. + AwaitingPeers, + /// Downloading and verifying grandpa warp proofs. + DownloadingWarpProofs, + /// Downloading state data. + DownloadingState, + /// Importing state. + ImportingState, + /// Downloading block history. + DownloadingBlocks(NumberFor), +} + +impl fmt::Display for WarpSyncPhase { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::AwaitingPeers => write!(f, "Waiting for peers"), + Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), + Self::DownloadingState => write!(f, "Downloading state"), + Self::ImportingState => write!(f, "Importing state"), + Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n), + } + } +} + +/// Reported warp sync progress. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct WarpSyncProgress { + /// Estimated download percentage. + pub phase: WarpSyncPhase, + /// Total bytes downloaded so far. + pub total_bytes: u64, +} diff --git a/client/network/light/Cargo.toml b/client/network/light/Cargo.toml index 924b8ed06ab0a..c1a0fb4759320 100644 --- a/client/network/light/Cargo.toml +++ b/client/network/light/Cargo.toml @@ -21,7 +21,8 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" -libp2p = "0.45.1" +hex = "0.4.0" +libp2p = "0.46.1" log = "0.4.16" prost = "0.10" sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } diff --git a/client/network/light/src/light_client_requests.rs b/client/network/light/src/light_client_requests.rs index 9eccef41e833d..b58426cf15992 100644 --- a/client/network/light/src/light_client_requests.rs +++ b/client/network/light/src/light_client_requests.rs @@ -25,16 +25,31 @@ use sc_network_common::{config::ProtocolId, request_responses::ProtocolConfig}; use std::time::Duration; -/// Generate the light client protocol name from chain specific protocol identifier. -fn generate_protocol_name(protocol_id: &ProtocolId) -> String { +/// Generate the light client protocol name from the genesis hash and fork id. +fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + if let Some(fork_id) = fork_id { + format!("/{}/{}/light/2", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/light/2", hex::encode(genesis_hash)) + } +} + +/// Generate the legacy light client protocol name from chain specific protocol identifier. +fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String { format!("/{}/light/2", protocol_id.as_ref()) } /// Generates a [`ProtocolConfig`] for the light client request protocol, refusing incoming /// requests. -pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { +pub fn generate_protocol_config>( + protocol_id: &ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, +) -> ProtocolConfig { ProtocolConfig { - name: generate_protocol_name(protocol_id).into(), + name: generate_protocol_name(genesis_hash, fork_id).into(), + fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) + .collect(), max_request_size: 1 * 1024 * 1024, max_response_size: 16 * 1024 * 1024, request_timeout: Duration::from_secs(15), diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 3c87ccfd6ed9f..727a9b0d7e820 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -28,7 +28,7 @@ use futures::{channel::mpsc, prelude::*}; use libp2p::PeerId; use log::{debug, trace}; use prost::Message; -use sc_client_api::{ProofProvider, StorageProof}; +use sc_client_api::{BlockBackend, ProofProvider, StorageProof}; use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, @@ -54,15 +54,27 @@ pub struct LightClientRequestHandler { impl LightClientRequestHandler where B: Block, - Client: ProofProvider + Send + Sync + 'static, + Client: BlockBackend + ProofProvider + Send + Sync + 'static, { /// Create a new [`LightClientRequestHandler`]. - pub fn new(protocol_id: &ProtocolId, client: Arc) -> (Self, ProtocolConfig) { + pub fn new( + protocol_id: &ProtocolId, + fork_id: Option<&str>, + client: Arc, + ) -> (Self, ProtocolConfig) { // For now due to lack of data on light client request handling in production systems, this // value is chosen to match the block request limit. let (tx, request_receiver) = mpsc::channel(20); - let mut protocol_config = super::generate_protocol_config(protocol_id); + let mut protocol_config = super::generate_protocol_config( + protocol_id, + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + fork_id, + ); protocol_config.inbound_queue = Some(tx); (Self { client, request_receiver, _block: PhantomData::default() }, protocol_config) diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 091dd116e4c9c..4ff415788f4ea 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -21,7 +21,7 @@ use crate::{ discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, peer_info, protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, - request_responses, DhtEvent, ObservedRole, + request_responses, }; use bytes::Bytes; @@ -38,10 +38,14 @@ use libp2p::{ NetworkBehaviour, }; use log::debug; -use prost::Message; + use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::import_queue::{IncomingBlock, Origin}; -use sc_network_common::{config::ProtocolId, request_responses::ProtocolConfig}; +use sc_network_common::{ + config::ProtocolId, + protocol::event::{DhtEvent, ObservedRole}, + request_responses::{IfDisconnected, ProtocolConfig, RequestFailure}, +}; use sc_peerset::PeersetHandle; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus::BlockOrigin; @@ -57,9 +61,7 @@ use std::{ time::Duration, }; -pub use crate::request_responses::{ - IfDisconnected, InboundFailure, OutboundFailure, RequestFailure, RequestId, ResponseFailure, -}; +pub use crate::request_responses::{InboundFailure, OutboundFailure, RequestId, ResponseFailure}; /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] @@ -382,42 +384,44 @@ where .events .push_back(BehaviourOut::JustificationImport(origin, hash, nb, justification)), CustomMessageOutcome::BlockRequest { target, request, pending_response } => { - let mut buf = Vec::with_capacity(request.encoded_len()); - if let Err(err) = request.encode(&mut buf) { - log::warn!( - target: "sync", - "Failed to encode block request {:?}: {:?}", - request, err - ); - return + match self.substrate.encode_block_request(&request) { + Ok(data) => { + self.request_responses.send_request( + &target, + &self.block_request_protocol_name, + data, + pending_response, + IfDisconnected::ImmediateError, + ); + }, + Err(err) => { + log::warn!( + target: "sync", + "Failed to encode block request {:?}: {:?}", + request, err + ); + }, } - - self.request_responses.send_request( - &target, - &self.block_request_protocol_name, - buf, - pending_response, - IfDisconnected::ImmediateError, - ); }, CustomMessageOutcome::StateRequest { target, request, pending_response } => { - let mut buf = Vec::with_capacity(request.encoded_len()); - if let Err(err) = request.encode(&mut buf) { - log::warn!( - target: "sync", - "Failed to encode state request {:?}: {:?}", - request, err - ); - return + match self.substrate.encode_state_request(&request) { + Ok(data) => { + self.request_responses.send_request( + &target, + &self.state_request_protocol_name, + data, + pending_response, + IfDisconnected::ImmediateError, + ); + }, + Err(err) => { + log::warn!( + target: "sync", + "Failed to encode state request {:?}: {:?}", + request, err + ); + }, } - - self.request_responses.send_request( - &target, - &self.state_request_protocol_name, - buf, - pending_response, - IfDisconnected::ImmediateError, - ); }, CustomMessageOutcome::WarpSyncRequest { target, request, pending_response } => match &self.warp_sync_protocol_name { diff --git a/client/network/src/bitswap.rs b/client/network/src/bitswap.rs index d5039faaca113..2dab45adc5618 100644 --- a/client/network/src/bitswap.rs +++ b/client/network/src/bitswap.rs @@ -1,4 +1,4 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. +// Copyright 2022 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify @@ -118,8 +118,7 @@ where fn upgrade_outbound(self, mut socket: TSocket, _info: Self::Info) -> Self::Future { Box::pin(async move { - let mut data = Vec::with_capacity(self.encoded_len()); - self.encode(&mut data)?; + let data = self.encode_to_vec(); upgrade::write_length_prefixed(&mut socket, data).await }) } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index e44977e5be6b3..52fa28e76e207 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -26,27 +26,22 @@ pub use sc_network_common::{ request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, + sync::warp::WarpSyncProvider, }; -pub use sc_network_sync::warp_request_handler::WarpSyncProvider; pub use libp2p::{build_multiaddr, core::PublicKey, identity}; -// Note: this re-export shouldn't be part of the public API of the crate and will be removed in -// the future. -#[doc(hidden)] -pub use crate::protocol::ProtocolConfig; - use crate::ExHashT; use core::{fmt, iter}; use futures::future; use libp2p::{ identity::{ed25519, Keypair}, - multiaddr, Multiaddr, PeerId, + multiaddr, Multiaddr, }; use prometheus_endpoint::Registry; use sc_consensus::ImportQueue; -use sp_consensus::block_validation::BlockAnnounceValidator; +use sc_network_common::{config::MultiaddrWithPeerId, sync::ChainSync}; use sp_runtime::traits::Block as BlockT; use std::{ borrow::Cow, @@ -59,7 +54,6 @@ use std::{ path::{Path, PathBuf}, pin::Pin, str, - str::FromStr, sync::Arc, }; use zeroize::Zeroize; @@ -92,17 +86,21 @@ where /// the network. pub transaction_pool: Arc>, - /// Name of the protocol to use on the wire. Should be different for each chain. + /// Legacy name of the protocol to use on the wire. Should be different for each chain. pub protocol_id: ProtocolId, + /// Fork ID to distinguish protocols of different hard forks. Part of the standard protocol + /// name on the wire. + pub fork_id: Option, + /// Import queue to use. /// /// The import queue is the component that verifies that blocks received from other nodes are /// valid. pub import_queue: Box>, - /// Type to check incoming block announcements. - pub block_announce_validator: Box + Send>, + /// Instance of chain sync implementation. + pub chain_sync: Box>, /// Registry for recording prometheus metrics to. pub metrics_registry: Option, @@ -114,31 +112,31 @@ where /// block requests, if enabled. /// /// Can be constructed either via - /// [`sc_network_sync::block_request_handler::generate_protocol_config`] allowing outgoing but - /// not incoming requests, or constructed via [`sc_network_sync::block_request_handler:: - /// BlockRequestHandler::new`] allowing both outgoing and incoming requests. + /// `sc_network_sync::block_request_handler::generate_protocol_config` allowing outgoing but + /// not incoming requests, or constructed via `sc_network_sync::block_request_handler:: + /// BlockRequestHandler::new` allowing both outgoing and incoming requests. pub block_request_protocol_config: RequestResponseConfig, /// Request response configuration for the light client request protocol. /// /// Can be constructed either via - /// [`sc_network_light::light_client_requests::generate_protocol_config`] allowing outgoing but + /// `sc_network_light::light_client_requests::generate_protocol_config` allowing outgoing but /// not incoming requests, or constructed via - /// [`sc_network_light::light_client_requests::handler::LightClientRequestHandler::new`] + /// `sc_network_light::light_client_requests::handler::LightClientRequestHandler::new` /// allowing both outgoing and incoming requests. pub light_client_request_protocol_config: RequestResponseConfig, /// Request response configuration for the state request protocol. /// /// Can be constructed either via - /// [`sc_network_sync::block_request_handler::generate_protocol_config`] allowing outgoing but + /// `sc_network_sync::state_request_handler::generate_protocol_config` allowing outgoing but /// not incoming requests, or constructed via - /// [`crate::state_request_handler::StateRequestHandler::new`] allowing + /// `sc_network_sync::state_request_handler::StateRequestHandler::new` allowing /// both outgoing and incoming requests. pub state_request_protocol_config: RequestResponseConfig, - /// Optional warp sync protocol support. Include protocol config and sync provider. - pub warp_sync: Option<(Arc>, RequestResponseConfig)>, + /// Optional warp sync protocol config. + pub warp_sync_protocol_config: Option, } /// Role of the local node. @@ -146,8 +144,6 @@ where pub enum Role { /// Regular full node. Full, - /// Regular light node. - Light, /// Actual authority. Authority, } @@ -157,18 +153,12 @@ impl Role { pub fn is_authority(&self) -> bool { matches!(self, Self::Authority { .. }) } - - /// True for [`Role::Light`]. - pub fn is_light(&self) -> bool { - matches!(self, Self::Light { .. }) - } } impl fmt::Display for Role { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Full => write!(f, "FULL"), - Self::Light => write!(f, "LIGHT"), Self::Authority { .. } => write!(f, "AUTHORITY"), } } @@ -234,132 +224,8 @@ impl TransactionPool for EmptyTransaction } } -/// Parses a string address and splits it into Multiaddress and PeerId, if -/// valid. -/// -/// # Example -/// -/// ``` -/// # use sc_network::{Multiaddr, PeerId, config::parse_str_addr}; -/// let (peer_id, addr) = parse_str_addr( -/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" -/// ).unwrap(); -/// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::().unwrap()); -/// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::().unwrap()); -/// ``` -pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { - let addr: Multiaddr = addr_str.parse()?; - parse_addr(addr) -} - -/// Splits a Multiaddress into a Multiaddress and PeerId. -pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { - let who = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => - PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)?, - _ => return Err(ParseErr::PeerIdMissing), - }; - - Ok((who, addr)) -} - -/// Address of a node, including its identity. -/// -/// This struct represents a decoded version of a multiaddress that ends with `/p2p/`. -/// -/// # Example -/// -/// ``` -/// # use sc_network::{Multiaddr, PeerId, config::MultiaddrWithPeerId}; -/// let addr: MultiaddrWithPeerId = -/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse().unwrap(); -/// assert_eq!(addr.peer_id.to_base58(), "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"); -/// assert_eq!(addr.multiaddr.to_string(), "/ip4/198.51.100.19/tcp/30333"); -/// ``` -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq)] -#[serde(try_from = "String", into = "String")] -pub struct MultiaddrWithPeerId { - /// Address of the node. - pub multiaddr: Multiaddr, - /// Its identity. - pub peer_id: PeerId, -} - -impl MultiaddrWithPeerId { - /// Concatenates the multiaddress and peer ID into one multiaddress containing both. - pub fn concat(&self) -> Multiaddr { - let proto = multiaddr::Protocol::P2p(From::from(self.peer_id)); - self.multiaddr.clone().with(proto) - } -} - -impl fmt::Display for MultiaddrWithPeerId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.concat(), f) - } -} - -impl FromStr for MultiaddrWithPeerId { - type Err = ParseErr; - - fn from_str(s: &str) -> Result { - let (peer_id, multiaddr) = parse_str_addr(s)?; - Ok(Self { peer_id, multiaddr }) - } -} - -impl From for String { - fn from(ma: MultiaddrWithPeerId) -> String { - format!("{}", ma) - } -} - -impl TryFrom for MultiaddrWithPeerId { - type Error = ParseErr; - fn try_from(string: String) -> Result { - string.parse() - } -} - -/// Error that can be generated by `parse_str_addr`. -#[derive(Debug)] -pub enum ParseErr { - /// Error while parsing the multiaddress. - MultiaddrParse(multiaddr::Error), - /// Multihash of the peer ID is invalid. - InvalidPeerId, - /// The peer ID is missing from the address. - PeerIdMissing, -} - -impl fmt::Display for ParseErr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::MultiaddrParse(err) => write!(f, "{}", err), - Self::InvalidPeerId => write!(f, "Peer id at the end of the address is invalid"), - Self::PeerIdMissing => write!(f, "Peer id is missing from the address"), - } - } -} - -impl std::error::Error for ParseErr { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Self::MultiaddrParse(err) => Some(err), - Self::InvalidPeerId => None, - Self::PeerIdMissing => None, - } - } -} - -impl From for ParseErr { - fn from(err: multiaddr::Error) -> ParseErr { - Self::MultiaddrParse(err) - } -} - /// Sync operation mode. -#[derive(Clone, Debug, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum SyncMode { /// Full block download and verification. Full, diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 2bae2fb807f7e..ab93662968dc2 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -52,8 +52,8 @@ use futures_timer::Delay; use ip_network::IpNetwork; use libp2p::{ core::{ - connection::{ConnectionId, ListenerId}, - ConnectedPoint, Multiaddr, PeerId, PublicKey, + connection::ConnectionId, transport::ListenerId, ConnectedPoint, Multiaddr, PeerId, + PublicKey, }, kad::{ handler::KademliaHandlerProto, @@ -1030,7 +1030,7 @@ mod tests { let noise_keys = noise::Keypair::::new().into_authentic(&keypair).unwrap(); - let transport = MemoryTransport + let transport = MemoryTransport::new() .upgrade(upgrade::Version::V1) .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) .multiplex(yamux::YamuxConfig::default()) @@ -1069,7 +1069,7 @@ mod tests { // Skip the first swarm as all other swarms already know it. .skip(1) .filter(|p| *p != n) - .map(|p| Swarm::local_peer_id(&swarms[p].0).clone()) + .map(|p| *Swarm::local_peer_id(&swarms[p].0)) .collect::>() }) .collect::>(); diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index fff30550eb8c9..b1d70c28289bd 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -262,26 +262,26 @@ pub mod transactions; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; -pub use protocol::{ - event::{DhtEvent, Event, ObservedRole}, - PeerInfo, -}; -pub use sc_network_light::light_client_requests; -pub use sc_network_sync::{ - block_request_handler, - state::StateDownloadProgress, - state_request_handler, - warp::{WarpSyncPhase, WarpSyncProgress}, - warp_request_handler, SyncState, +pub use protocol::PeerInfo; +pub use sc_network_common::{ + protocol::event::{DhtEvent, Event, ObservedRole}, + request_responses::{IfDisconnected, RequestFailure}, + service::{ + KademliaKey, NetworkBlock, NetworkDHTProvider, NetworkRequest, NetworkSigner, + NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, + NetworkTransaction, Signature, SigningError, + }, + sync::{ + warp::{WarpSyncPhase, WarpSyncProgress}, + StateDownloadProgress, SyncState, + }, }; pub use service::{ - DecodingError, IfDisconnected, KademliaKey, Keypair, NetworkService, NetworkWorker, - NotificationSender, NotificationSenderReady, OutboundFailure, PublicKey, RequestFailure, - Signature, SigningError, + DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, + NotificationSenderReady, OutboundFailure, PublicKey, }; pub use sc_peerset::ReputationChange; -use sp_runtime::traits::{Block as BlockT, NumberFor}; /// The maximum allowed number of established connections per peer. /// @@ -300,35 +300,3 @@ pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} - -/// Trait for providing information about the local network state -pub trait NetworkStateInfo { - /// Returns the local external addresses. - fn external_addresses(&self) -> Vec; - - /// Returns the local Peer ID. - fn local_peer_id(&self) -> PeerId; -} - -/// Overview status of the network. -#[derive(Clone)] -pub struct NetworkStatus { - /// Current global sync state. - pub sync_state: SyncState, - /// Target sync block number. - pub best_seen_block: Option>, - /// Number of peers participating in syncing. - pub num_sync_peers: u32, - /// Total number of connected peers - pub num_connected_peers: usize, - /// Total number of active peers. - pub num_active_peers: usize, - /// The total number of bytes received. - pub total_bytes_inbound: u64, - /// The total number of bytes sent. - pub total_bytes_outbound: u64, - /// State sync in progress. - pub state_sync: Option, - /// Warp sync in progress. - pub warp_sync: Option>, -} diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index c3e79437bdd06..d668cb25ea455 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -21,9 +21,8 @@ use fnv::FnvHashMap; use futures::prelude::*; use libp2p::{ core::{ - connection::{ConnectionId, ListenerId}, - either::EitherOutput, - ConnectedPoint, PeerId, PublicKey, + connection::ConnectionId, either::EitherOutput, transport::ListenerId, ConnectedPoint, + PeerId, PublicKey, }, identify::{Identify, IdentifyConfig, IdentifyEvent, IdentifyInfo}, ping::{Ping, PingConfig, PingEvent, PingSuccess}, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 387a7b3fdde90..351e7d207ad1e 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -18,19 +18,14 @@ use crate::{ config, error, - request_responses::RequestFailure, utils::{interval, LruHashSet}, - warp_request_handler::{EncodedProof, WarpSyncProvider}, }; use bytes::Bytes; use codec::{Decode, DecodeAll, Encode}; use futures::{channel::oneshot, prelude::*}; use libp2p::{ - core::{ - connection::{ConnectionId, ListenerId}, - ConnectedPoint, - }, + core::{connection::ConnectionId, transport::ListenerId, ConnectedPoint}, request_response::OutboundFailure, swarm::{ ConnectionHandler, IntoConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, @@ -45,21 +40,24 @@ use message::{ }; use notifications::{Notifications, NotificationsOut}; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; -use prost::Message as _; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_consensus::import_queue::{BlockImportError, BlockImportStatus, IncomingBlock, Origin}; -use sc_network_common::config::ProtocolId; -use sc_network_sync::{ - message::{ - BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, BlockState, - FromBlock, +use sc_network_common::{ + config::ProtocolId, + request_responses::RequestFailure, + sync::{ + message::{ + BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, BlockState, + }, + warp::{EncodedProof, WarpProofRequest}, + BadPeer, ChainSync, OnBlockData, OnBlockJustification, OnStateData, OpaqueBlockRequest, + OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PollBlockAnnounceValidation, + SyncStatus, }, - schema::v1::StateResponse, - BadPeer, ChainSync, OnBlockData, OnBlockJustification, OnStateData, - PollBlockAnnounceValidation, Status as SyncStatus, }; use sp_arithmetic::traits::SaturatedConversion; -use sp_consensus::{block_validation::BlockAnnounceValidator, BlockOrigin}; +use sp_blockchain::HeaderMetadata; +use sp_consensus::BlockOrigin; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, @@ -78,11 +76,9 @@ use std::{ mod notifications; -pub mod event; pub mod message; pub use notifications::{NotificationsSink, NotifsHandlerError, Ready}; -use sp_blockchain::HeaderMetadata; /// Interval at which we perform time based maintenance const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); @@ -170,17 +166,22 @@ pub struct Protocol { tick_timeout: Pin + Send>>, /// Pending list of messages to return from `poll` as a priority. pending_messages: VecDeque>, - config: ProtocolConfig, + /// Assigned roles. + roles: Roles, genesis_hash: B::Hash, /// State machine that handles the list of in-progress requests. Only full node peers are /// registered. - sync: ChainSync, + chain_sync: Box>, // All connected peers. Contains both full and light node peers. peers: HashMap>, chain: Arc, /// List of nodes for which we perform additional logging because they are important for the /// user. important_peers: HashSet, + /// List of nodes that should never occupy peer slots. + default_peers_set_no_slot_peers: HashSet, + /// Actual list of connected no-slot nodes. + default_peers_set_no_slot_connected_peers: HashSet, /// Value that was passed as part of the configuration. Used to cap the number of full nodes. default_peers_set_num_full: usize, /// Number of slots to allocate to light nodes. @@ -234,38 +235,6 @@ pub struct PeerInfo { pub best_number: ::Number, } -/// Configuration for the Substrate-specific part of the networking layer. -#[derive(Clone)] -pub struct ProtocolConfig { - /// Assigned roles. - pub roles: Roles, - /// Maximum number of peers to ask the same blocks in parallel. - pub max_parallel_downloads: u32, - /// Enable state sync. - pub sync_mode: config::SyncMode, -} - -impl ProtocolConfig { - fn sync_mode(&self) -> sc_network_sync::SyncMode { - if self.roles.is_light() { - sc_network_sync::SyncMode::Light - } else { - match self.sync_mode { - config::SyncMode::Full => sc_network_sync::SyncMode::Full, - config::SyncMode::Fast { skip_proofs, storage_chain_mode } => - sc_network_sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, - config::SyncMode::Warp => sc_network_sync::SyncMode::Warp, - } - } - } -} - -impl Default for ProtocolConfig { - fn default() -> ProtocolConfig { - Self { roles: Roles::FULL, max_parallel_downloads: 5, sync_mode: config::SyncMode::Full } - } -} - /// Handshake sent when we open a block announces substream. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] struct BlockAnnouncesHandshake { @@ -281,12 +250,12 @@ struct BlockAnnouncesHandshake { impl BlockAnnouncesHandshake { fn build( - protocol_config: &ProtocolConfig, + roles: Roles, best_number: NumberFor, best_hash: B::Hash, genesis_hash: B::Hash, ) -> Self { - Self { genesis_hash, roles: protocol_config.roles, best_number, best_hash } + Self { genesis_hash, roles, best_number, best_hash } } } @@ -303,24 +272,16 @@ where { /// Create a new instance. pub fn new( - config: ProtocolConfig, + roles: Roles, chain: Arc, protocol_id: ProtocolId, + fork_id: &Option, network_config: &config::NetworkConfiguration, notifications_protocols_handshakes: Vec>, - block_announce_validator: Box + Send>, metrics_registry: Option<&Registry>, - warp_sync_provider: Option>>, - ) -> error::Result<(Protocol, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { + chain_sync: Box>, + ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let info = chain.info(); - let sync = ChainSync::new( - config.sync_mode(), - chain.clone(), - block_announce_validator, - config.max_parallel_downloads, - warp_sync_provider, - ) - .map_err(Box::new)?; let boot_node_ids = { let mut list = HashSet::new(); @@ -347,6 +308,17 @@ where imp_p }; + let default_peers_set_no_slot_peers = { + let mut no_slot_p: HashSet = network_config + .default_peers_set + .reserved_nodes + .iter() + .map(|reserved| reserved.peer_id) + .collect(); + no_slot_p.shrink_to_fit(); + no_slot_p + }; + let mut known_addresses = Vec::new(); let (peerset, peerset_handle) = { @@ -399,8 +371,17 @@ where sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets }) }; - let block_announces_protocol: Cow<'static, str> = - format!("/{}/block-announces/1", protocol_id.as_ref()).into(); + let block_announces_protocol = { + let genesis_hash = + chain.block_hash(0u32.into()).ok().flatten().expect("Genesis block exists; qed"); + if let Some(fork_id) = fork_id { + format!("/{}/{}/block-announces/1", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/block-announces/1", hex::encode(genesis_hash)) + } + }; + + let legacy_ba_protocol_name = format!("/{}/block-announces/1", protocol_id.as_ref()); let behaviour = { let best_number = info.best_number; @@ -408,12 +389,12 @@ where let genesis_hash = info.genesis_hash; let block_announces_handshake = - BlockAnnouncesHandshake::::build(&config, best_number, best_hash, genesis_hash) + BlockAnnouncesHandshake::::build(roles, best_number, best_hash, genesis_hash) .encode(); let sync_protocol_config = notifications::ProtocolConfig { - name: block_announces_protocol, - fallback_names: Vec::new(), + name: block_announces_protocol.into(), + fallback_names: iter::once(legacy_ba_protocol_name.into()).collect(), handshake: block_announces_handshake, max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, }; @@ -441,12 +422,14 @@ where let protocol = Self { tick_timeout: Box::pin(interval(TICK_TIMEOUT)), pending_messages: VecDeque::new(), - config, + roles, peers: HashMap::new(), chain, genesis_hash: info.genesis_hash, - sync, + chain_sync, important_peers, + default_peers_set_no_slot_peers, + default_peers_set_no_slot_connected_peers: HashSet::new(), default_peers_set_num_full: network_config.default_peers_set_num_full as usize, default_peers_set_num_light: { let total = network_config.default_peers_set.out_peers + @@ -513,49 +496,49 @@ where /// Current global sync state. pub fn sync_state(&self) -> SyncStatus { - self.sync.status() + self.chain_sync.status() } /// Target sync block number. pub fn best_seen_block(&self) -> Option> { - self.sync.status().best_seen_block + self.chain_sync.status().best_seen_block } /// Number of peers participating in syncing. pub fn num_sync_peers(&self) -> u32 { - self.sync.status().num_peers + self.chain_sync.status().num_peers } /// Number of blocks in the import queue. pub fn num_queued_blocks(&self) -> u32 { - self.sync.status().queued_blocks + self.chain_sync.status().queued_blocks } /// Number of downloaded blocks. pub fn num_downloaded_blocks(&self) -> usize { - self.sync.num_downloaded_blocks() + self.chain_sync.num_downloaded_blocks() } /// Number of active sync requests. pub fn num_sync_requests(&self) -> usize { - self.sync.num_sync_requests() + self.chain_sync.num_sync_requests() } /// Inform sync about new best imported block. pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { debug!(target: "sync", "New best block imported {:?}/#{}", hash, number); - self.sync.update_chain_info(&hash, number); + self.chain_sync.update_chain_info(&hash, number); self.behaviour.set_notif_protocol_handshake( HARDCODED_PEERSETS_SYNC, - BlockAnnouncesHandshake::::build(&self.config, number, hash, self.genesis_hash) + BlockAnnouncesHandshake::::build(self.roles, number, hash, self.genesis_hash) .encode(), ); } fn update_peer_info(&mut self, who: &PeerId) { - if let Some(info) = self.sync.peer_info(who) { + if let Some(info) = self.chain_sync.peer_info(who) { if let Some(ref mut peer) = self.peers.get_mut(who) { peer.info.best_hash = info.best_hash; peer.info.best_number = info.best_number; @@ -568,14 +551,6 @@ where self.peers.iter().map(|(id, peer)| (id, &peer.info)) } - fn prepare_block_request( - &mut self, - who: PeerId, - request: BlockRequest, - ) -> CustomMessageOutcome { - prepare_block_request::(&mut self.peers, who, request) - } - /// Called by peer when it is disconnecting. /// /// Returns a result if the handshake of this peer was indeed accepted. @@ -587,10 +562,13 @@ where } if let Some(_peer_data) = self.peers.remove(&peer) { - if let Some(OnBlockData::Import(origin, blocks)) = self.sync.peer_disconnected(&peer) { + if let Some(OnBlockData::Import(origin, blocks)) = + self.chain_sync.peer_disconnected(&peer) + { self.pending_messages .push_back(CustomMessageOutcome::BlockImport(origin, blocks)); } + self.default_peers_set_no_slot_connected_peers.remove(&peer); Ok(()) } else { Err(()) @@ -608,62 +586,9 @@ where &mut self, peer_id: PeerId, request: BlockRequest, - response: sc_network_sync::schema::v1::BlockResponse, + response: OpaqueBlockResponse, ) -> CustomMessageOutcome { - let blocks = response - .blocks - .into_iter() - .map(|block_data| { - Ok(BlockData:: { - hash: Decode::decode(&mut block_data.hash.as_ref())?, - header: if !block_data.header.is_empty() { - Some(Decode::decode(&mut block_data.header.as_ref())?) - } else { - None - }, - body: if request.fields.contains(BlockAttributes::BODY) { - Some( - block_data - .body - .iter() - .map(|body| Decode::decode(&mut body.as_ref())) - .collect::, _>>()?, - ) - } else { - None - }, - indexed_body: if request.fields.contains(BlockAttributes::INDEXED_BODY) { - Some(block_data.indexed_body) - } else { - None - }, - receipt: if !block_data.receipt.is_empty() { - Some(block_data.receipt) - } else { - None - }, - message_queue: if !block_data.message_queue.is_empty() { - Some(block_data.message_queue) - } else { - None - }, - justification: if !block_data.justification.is_empty() { - Some(block_data.justification) - } else if block_data.is_empty_justification { - Some(Vec::new()) - } else { - None - }, - justifications: if !block_data.justifications.is_empty() { - Some(DecodeAll::decode_all(&mut block_data.justifications.as_ref())?) - } else { - None - }, - }) - }) - .collect::, codec::Error>>(); - - let blocks = match blocks { + let blocks = match self.chain_sync.block_response_into_blocks(&request, response) { Ok(blocks) => blocks, Err(err) => { debug!(target: "sync", "Failed to decode block response from {}: {}", peer_id, err); @@ -693,7 +618,7 @@ where ); if request.fields == BlockAttributes::JUSTIFICATION { - match self.sync.on_block_justification(peer_id, block_response) { + match self.chain_sync.on_block_justification(peer_id, block_response) { Ok(OnBlockJustification::Nothing) => CustomMessageOutcome::None, Ok(OnBlockJustification::Import { peer, hash, number, justifications }) => CustomMessageOutcome::JustificationImport(peer, hash, number, justifications), @@ -704,10 +629,11 @@ where }, } } else { - match self.sync.on_block_data(&peer_id, Some(request), block_response) { + match self.chain_sync.on_block_data(&peer_id, Some(request), block_response) { Ok(OnBlockData::Import(origin, blocks)) => CustomMessageOutcome::BlockImport(origin, blocks), - Ok(OnBlockData::Request(peer, req)) => self.prepare_block_request(peer, req), + Ok(OnBlockData::Request(peer, req)) => + prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, peer, req), Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); @@ -722,9 +648,9 @@ where pub fn on_state_response( &mut self, peer_id: PeerId, - response: StateResponse, + response: OpaqueStateResponse, ) -> CustomMessageOutcome { - match self.sync.on_state_data(&peer_id, response) { + match self.chain_sync.on_state_data(&peer_id, response) { Ok(OnStateData::Import(origin, block)) => CustomMessageOutcome::BlockImport(origin, vec![block]), Ok(OnStateData::Continue) => CustomMessageOutcome::None, @@ -741,9 +667,9 @@ where pub fn on_warp_sync_response( &mut self, peer_id: PeerId, - response: crate::warp_request_handler::EncodedProof, + response: EncodedProof, ) -> CustomMessageOutcome { - match self.sync.on_warp_sync_data(&peer_id, response) { + match self.chain_sync.on_warp_sync_data(&peer_id, response) { Ok(()) => CustomMessageOutcome::None, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); @@ -801,7 +727,7 @@ where return Err(()) } - if self.config.roles.is_light() { + if self.roles.is_light() { // we're not interested in light peers if status.roles.is_light() { debug!(target: "sync", "Peer {} is unable to serve light requests", who); @@ -824,14 +750,22 @@ where } } - if status.roles.is_full() && self.sync.num_peers() >= self.default_peers_set_num_full { + let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&who); + let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; + + if status.roles.is_full() && + self.chain_sync.num_peers() >= + self.default_peers_set_num_full + + self.default_peers_set_no_slot_connected_peers.len() + + this_peer_reserved_slot + { debug!(target: "sync", "Too many full nodes, rejecting {}", who); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); return Err(()) } if status.roles.is_light() && - (self.peers.len() - self.sync.num_peers()) >= self.default_peers_set_num_light + (self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light { // Make sure that not all slots are occupied by light clients. debug!(target: "sync", "Too many light nodes, rejecting {}", who); @@ -852,7 +786,7 @@ where }; let req = if peer.info.roles.is_full() { - match self.sync.new_peer(who, peer.info.best_hash, peer.info.best_number) { + match self.chain_sync.new_peer(who, peer.info.best_hash, peer.info.best_number) { Ok(req) => req, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); @@ -867,12 +801,19 @@ where debug!(target: "sync", "Connected {}", who); self.peers.insert(who, peer); + if no_slot_peer { + self.default_peers_set_no_slot_connected_peers.insert(who); + } self.pending_messages .push_back(CustomMessageOutcome::PeerNewBest(who, status.best_number)); if let Some(req) = req { - let event = self.prepare_block_request(who, req); - self.pending_messages.push_back(event); + self.pending_messages.push_back(prepare_block_request( + self.chain_sync.as_ref(), + &mut self.peers, + who, + req, + )); } Ok(()) @@ -956,7 +897,7 @@ where }; if peer.info.roles.is_full() { - self.sync.push_block_announce_validation(who, hash, announce, is_best); + self.chain_sync.push_block_announce_validation(who, hash, announce, is_best); } } @@ -1013,7 +954,7 @@ where // to import header from announced block let's construct response to request that normally // would have been sent over network (but it is not in our case) - let blocks_to_import = self.sync.on_block_data( + let blocks_to_import = self.chain_sync.on_block_data( &who, None, BlockResponse:: { @@ -1038,7 +979,8 @@ where match blocks_to_import { Ok(OnBlockData::Import(origin, blocks)) => CustomMessageOutcome::BlockImport(origin, blocks), - Ok(OnBlockData::Request(peer, req)) => self.prepare_block_request(peer, req), + Ok(OnBlockData::Request(peer, req)) => + prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, peer, req), Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); @@ -1050,7 +992,7 @@ where /// Call this when a block has been finalized. The sync layer may have some additional /// requesting to perform. pub fn on_block_finalized(&mut self, hash: B::Hash, header: &B::Header) { - self.sync.on_block_finalized(&hash, *header.number()) + self.chain_sync.on_block_finalized(&hash, *header.number()) } /// Request a justification for the given block. @@ -1058,12 +1000,12 @@ where /// Uses `protocol` to queue a new justification request and tries to dispatch all pending /// requests. pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - self.sync.request_justification(hash, number) + self.chain_sync.request_justification(hash, number) } /// Clear all pending justification requests. pub fn clear_justification_requests(&mut self) { - self.sync.clear_justification_requests(); + self.chain_sync.clear_justification_requests(); } /// Request syncing for the given block from given set of peers. @@ -1075,7 +1017,7 @@ where hash: &B::Hash, number: NumberFor, ) { - self.sync.set_sync_fork_request(peers, hash, number) + self.chain_sync.set_sync_fork_request(peers, hash, number) } /// A batch of blocks have been processed, with or without errors. @@ -1087,11 +1029,12 @@ where count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { - let results = self.sync.on_blocks_processed(imported, count, results); + let results = self.chain_sync.on_blocks_processed(imported, count, results); for result in results { match result { Ok((id, req)) => { self.pending_messages.push_back(prepare_block_request( + self.chain_sync.as_ref(), &mut self.peers, id, req, @@ -1114,7 +1057,7 @@ where number: NumberFor, success: bool, ) { - self.sync.on_justification_import(hash, number, success); + self.chain_sync.on_justification_import(hash, number, success); if !success { info!("💔 Invalid justification provided by {} for #{}", who, hash); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); @@ -1231,12 +1174,22 @@ where } } + /// Encode implementation-specific block request. + pub fn encode_block_request(&self, request: &OpaqueBlockRequest) -> Result, String> { + self.chain_sync.encode_block_request(request) + } + + /// Encode implementation-specific state request. + pub fn encode_state_request(&self, request: &OpaqueStateRequest) -> Result, String> { + self.chain_sync.encode_state_request(request) + } + fn report_metrics(&self) { if let Some(metrics) = &self.metrics { let n = u64::try_from(self.peers.len()).unwrap_or(std::u64::MAX); metrics.peers.set(n); - let m = self.sync.metrics(); + let m = self.chain_sync.metrics(); metrics.fork_targets.set(m.fork_targets.into()); metrics.queued_blocks.set(m.queued_blocks.into()); @@ -1262,6 +1215,7 @@ where } fn prepare_block_request( + chain_sync: &dyn ChainSync, peers: &mut HashMap>, who: PeerId, request: BlockRequest, @@ -1272,19 +1226,7 @@ fn prepare_block_request( peer.request = Some((PeerRequest::Block(request.clone()), rx)); } - let request = sc_network_sync::schema::v1::BlockRequest { - fields: request.fields.to_be_u32(), - from_block: match request.from { - FromBlock::Hash(h) => - Some(sc_network_sync::schema::v1::block_request::FromBlock::Hash(h.encode())), - FromBlock::Number(n) => - Some(sc_network_sync::schema::v1::block_request::FromBlock::Number(n.encode())), - }, - to_block: request.to.map(|h| h.encode()).unwrap_or_default(), - direction: request.direction as i32, - max_blocks: request.max.unwrap_or(0), - support_multiple_justifications: true, - }; + let request = chain_sync.create_opaque_block_request(&request); CustomMessageOutcome::BlockRequest { target: who, request, pending_response: tx } } @@ -1292,7 +1234,7 @@ fn prepare_block_request( fn prepare_state_request( peers: &mut HashMap>, who: PeerId, - request: sc_network_sync::schema::v1::StateRequest, + request: OpaqueStateRequest, ) -> CustomMessageOutcome { let (tx, rx) = oneshot::channel(); @@ -1305,7 +1247,7 @@ fn prepare_state_request( fn prepare_warp_sync_request( peers: &mut HashMap>, who: PeerId, - request: crate::warp_request_handler::Request, + request: WarpProofRequest, ) -> CustomMessageOutcome { let (tx, rx) = oneshot::channel(); @@ -1349,19 +1291,19 @@ pub enum CustomMessageOutcome { /// A new block request must be emitted. BlockRequest { target: PeerId, - request: sc_network_sync::schema::v1::BlockRequest, + request: OpaqueBlockRequest, pending_response: oneshot::Sender, RequestFailure>>, }, /// A new storage request must be emitted. StateRequest { target: PeerId, - request: sc_network_sync::schema::v1::StateRequest, + request: OpaqueStateRequest, pending_response: oneshot::Sender, RequestFailure>>, }, /// A new warp sync request must be emitted. WarpSyncRequest { target: PeerId, - request: crate::warp_request_handler::Request, + request: WarpProofRequest, pending_response: oneshot::Sender, RequestFailure>>, }, /// Peer has a reported a new head of chain. @@ -1458,10 +1400,8 @@ where let (req, _) = peer.request.take().unwrap(); match req { PeerRequest::Block(req) => { - let protobuf_response = - match sc_network_sync::schema::v1::BlockResponse::decode( - &resp[..], - ) { + let response = + match self.chain_sync.decode_block_response(&resp[..]) { Ok(proto) => proto, Err(e) => { debug!( @@ -1477,13 +1417,11 @@ where }, }; - finished_block_requests.push((*id, req, protobuf_response)); + finished_block_requests.push((*id, req, response)); }, PeerRequest::State => { - let protobuf_response = - match sc_network_sync::schema::v1::StateResponse::decode( - &resp[..], - ) { + let response = + match self.chain_sync.decode_state_response(&resp[..]) { Ok(proto) => proto, Err(e) => { debug!( @@ -1499,7 +1437,7 @@ where }, }; - finished_state_requests.push((*id, protobuf_response)); + finished_state_requests.push((*id, response)); }, PeerRequest::WarpProof => { finished_warp_sync_requests.push((*id, resp)); @@ -1558,12 +1496,12 @@ where } } } - for (id, req, protobuf_response) in finished_block_requests { - let ev = self.on_block_response(id, req, protobuf_response); + for (id, req, response) in finished_block_requests { + let ev = self.on_block_response(id, req, response); self.pending_messages.push_back(ev); } - for (id, protobuf_response) in finished_state_requests { - let ev = self.on_state_response(id, protobuf_response); + for (id, response) in finished_state_requests { + let ev = self.on_state_response(id, response); self.pending_messages.push_back(ev); } for (id, response) in finished_warp_sync_requests { @@ -1575,25 +1513,32 @@ where self.tick(); } - for (id, request) in self.sync.block_requests() { - let event = prepare_block_request(&mut self.peers, *id, request); + for (id, request) in self + .chain_sync + .block_requests() + .map(|(peer_id, request)| (*peer_id, request)) + .collect::>() + { + let event = + prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, id, request); self.pending_messages.push_back(event); } - if let Some((id, request)) = self.sync.state_request() { + if let Some((id, request)) = self.chain_sync.state_request() { let event = prepare_state_request(&mut self.peers, id, request); self.pending_messages.push_back(event); } - for (id, request) in self.sync.justification_requests() { - let event = prepare_block_request(&mut self.peers, id, request); + for (id, request) in self.chain_sync.justification_requests().collect::>() { + let event = + prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, id, request); self.pending_messages.push_back(event); } - if let Some((id, request)) = self.sync.warp_sync_request() { + if let Some((id, request)) = self.chain_sync.warp_sync_request() { let event = prepare_warp_sync_request(&mut self.peers, id, request); self.pending_messages.push_back(event); } // Check if there is any block announcement validation finished. - while let Poll::Ready(result) = self.sync.poll_block_announce_validation(cx) { + while let Poll::Ready(result) = self.chain_sync.poll_block_announce_validation(cx) { match self.process_block_announce_validation_result(result) { CustomMessageOutcome::None => {}, outcome => self.pending_messages.push_back(outcome), @@ -1631,8 +1576,6 @@ where } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { - debug_assert!(negotiated_fallback.is_none()); - // `received_handshake` can be either a `Status` message if received from the // legacy substream ,or a `BlockAnnouncesHandshake` if received from the block // announces substream. @@ -1774,7 +1717,8 @@ where // Make sure that the newly added block announce validation future was // polled once to be registered in the task. - if let Poll::Ready(res) = self.sync.poll_block_announce_validation(cx) { + if let Poll::Ready(res) = self.chain_sync.poll_block_announce_validation(cx) + { self.process_block_announce_validation_result(res) } else { CustomMessageOutcome::None diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index a57740ec2746b..50c4a264a5f95 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -63,10 +63,12 @@ pub mod generic { use bitflags::bitflags; use codec::{Decode, Encode, Input, Output}; use sc_client_api::StorageProof; - use sc_network_common::message::RequestId; - use sc_network_sync::message::{ - generic::{BlockRequest, BlockResponse}, - BlockAnnounce, + use sc_network_common::{ + message::RequestId, + sync::message::{ + generic::{BlockRequest, BlockResponse}, + BlockAnnounce, + }, }; use sp_runtime::ConsensusEngineId; @@ -105,7 +107,6 @@ pub mod generic { fn from(roles: &'a crate::config::Role) -> Self { match roles { crate::config::Role::Full => Self::FULL, - crate::config::Role::Light => Self::LIGHT, crate::config::Role::Authority { .. } => Self::AUTHORITY, } } diff --git a/client/network/src/protocol/notifications/tests.rs b/client/network/src/protocol/notifications/tests.rs index da12dbf216c4c..fa79366d20283 100644 --- a/client/network/src/protocol/notifications/tests.rs +++ b/client/network/src/protocol/notifications/tests.rs @@ -23,8 +23,8 @@ use crate::protocol::notifications::{Notifications, NotificationsOut, ProtocolCo use futures::prelude::*; use libp2p::{ core::{ - connection::{ConnectionId, ListenerId}, - transport::MemoryTransport, + connection::ConnectionId, + transport::{ListenerId, MemoryTransport}, upgrade, ConnectedPoint, }, identity, noise, @@ -56,7 +56,7 @@ fn build_nodes() -> (Swarm, Swarm) { let noise_keys = noise::Keypair::::new().into_authentic(&keypair).unwrap(); - let transport = MemoryTransport + let transport = MemoryTransport::new() .upgrade(upgrade::Version::V1) .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) .multiplex(yamux::YamuxConfig::default()) diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 6c7b0f3fcdfc8..9eab85a4c1ce1 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -40,10 +40,7 @@ use futures::{ prelude::*, }; use libp2p::{ - core::{ - connection::{ConnectionId, ListenerId}, - ConnectedPoint, Multiaddr, PeerId, - }, + core::{connection::ConnectionId, transport::ListenerId, ConnectedPoint, Multiaddr, PeerId}, request_response::{ handler::RequestResponseHandler, ProtocolSupport, RequestResponse, RequestResponseCodec, RequestResponseConfig, RequestResponseEvent, RequestResponseMessage, ResponseChannel, @@ -53,7 +50,9 @@ use libp2p::{ NetworkBehaviourAction, PollParameters, }, }; -use sc_network_common::request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}; +use sc_network_common::request_responses::{ + IfDisconnected, IncomingRequest, OutgoingResponse, ProtocolConfig, RequestFailure, +}; use std::{ borrow::Cow, collections::{hash_map::Entry, HashMap}, @@ -121,26 +120,6 @@ impl From<(Cow<'static, str>, RequestId)> for ProtocolRequestId { } } -/// When sending a request, what to do on a disconnected recipient. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum IfDisconnected { - /// Try to connect to the peer. - TryConnect, - /// Just fail if the destination is not yet connected. - ImmediateError, -} - -/// Convenience functions for `IfDisconnected`. -impl IfDisconnected { - /// Shall we connect to a disconnected peer? - pub fn should_connect(self) -> bool { - match self { - Self::TryConnect => true, - Self::ImmediateError => false, - } - } -} - /// Implementation of `NetworkBehaviour` that provides support for request-response protocols. pub struct RequestResponsesBehaviour { /// The multiple sub-protocols, by name. @@ -223,7 +202,9 @@ impl RequestResponsesBehaviour { max_request_size: protocol.max_request_size, max_response_size: protocol.max_response_size, }, - iter::once((protocol.name.as_bytes().to_vec(), protocol_support)), + iter::once(protocol.name.as_bytes().to_vec()) + .chain(protocol.fallback_names.iter().map(|name| name.as_bytes().to_vec())) + .zip(iter::repeat(protocol_support)), cfg, ); @@ -788,23 +769,6 @@ pub enum RegisterError { DuplicateProtocol(Cow<'static, str>), } -/// Error in a request. -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum RequestFailure { - #[error("We are not currently connected to the requested peer.")] - NotConnected, - #[error("Given protocol hasn't been registered.")] - UnknownProtocol, - #[error("Remote has closed the substream before answering, thereby signaling that it considers the request as valid, but refused to answer it.")] - Refused, - #[error("The remote replied, but the local node is no longer interested in the response.")] - Obsolete, - /// Problem on the network. - #[error("Problem on the network: {0}")] - Network(OutboundFailure), -} - /// Error when processing a request sent by a remote. #[derive(Debug, thiserror::Error)] pub enum ResponseFailure { @@ -968,7 +932,7 @@ mod tests { let noise_keys = noise::Keypair::::new().into_authentic(&keypair).unwrap(); - let transport = MemoryTransport + let transport = MemoryTransport::new() .upgrade(upgrade::Version::V1) .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) .multiplex(libp2p::yamux::YamuxConfig::default()) @@ -1030,6 +994,7 @@ mod tests { let protocol_config = ProtocolConfig { name: From::from(protocol_name), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1130,6 +1095,7 @@ mod tests { let protocol_config = ProtocolConfig { name: From::from(protocol_name), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 8, // <-- important for the test request_timeout: Duration::from_secs(30), @@ -1226,6 +1192,7 @@ mod tests { let protocol_configs = vec![ ProtocolConfig { name: From::from(protocol_name_1), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1233,6 +1200,7 @@ mod tests { }, ProtocolConfig { name: From::from(protocol_name_2), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1250,6 +1218,7 @@ mod tests { let protocol_configs = vec![ ProtocolConfig { name: From::from(protocol_name_1), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1257,6 +1226,7 @@ mod tests { }, ProtocolConfig { name: From::from(protocol_name_2), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 1cc717a50d039..68ac4b8db8a7d 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -30,23 +30,24 @@ use crate::{ behaviour::{self, Behaviour, BehaviourOut}, bitswap::Bitswap, - config::{parse_str_addr, Params, TransportConfig}, + config::{Params, TransportConfig}, discovery::DiscoveryConfig, error::Error, network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, protocol::{ - self, event::Event, message::generic::Roles, NotificationsSink, NotifsHandlerError, - PeerInfo, Protocol, Ready, + self, message::generic::Roles, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, + Ready, }, - transactions, transport, DhtEvent, ExHashT, NetworkStateInfo, NetworkStatus, ReputationChange, + transactions, transport, ExHashT, ReputationChange, }; use codec::Encode as _; use futures::{channel::oneshot, prelude::*}; use libp2p::{ core::{either::EitherError, upgrade, ConnectedPoint, Executor}, + kad::record::Key as KademliaKey, multiaddr, ping::Failure as PingFailure, swarm::{ @@ -60,7 +61,18 @@ use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; -use sc_network_sync::{Status as SyncStatus, SyncState}; +use sc_network_common::{ + config::MultiaddrWithPeerId, + protocol::event::{DhtEvent, Event}, + request_responses::{IfDisconnected, RequestFailure}, + service::{ + NetworkDHTProvider, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSigner, + NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, + NotificationSender as NotificationSenderT, NotificationSenderError, + NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError, + }, + sync::{SyncState, SyncStatus}, +}; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; @@ -81,24 +93,15 @@ use std::{ task::Poll, }; -pub use behaviour::{ - IfDisconnected, InboundFailure, OutboundFailure, RequestFailure, ResponseFailure, -}; +pub use behaviour::{InboundFailure, OutboundFailure, ResponseFailure}; mod metrics; mod out_events; -mod signature; #[cfg(test)] mod tests; -pub use libp2p::{ - identity::{ - error::{DecodingError, SigningError}, - Keypair, PublicKey, - }, - kad::record::Key as KademliaKey, -}; -pub use signature::Signature; +pub use libp2p::identity::{error::DecodingError, Keypair, PublicKey}; +use sc_network_common::service::{NetworkBlock, NetworkRequest, NetworkTransaction}; /// Substrate network service. Handles network IO and manages connectivity. pub struct NetworkService { @@ -148,6 +151,36 @@ where /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. pub fn new(mut params: Params) -> Result { + // Private and public keys configuration. + let local_identity = params.network_config.node_key.clone().into_keypair()?; + let local_public = local_identity.public(); + let local_peer_id = local_public.to_peer_id(); + + params.network_config.boot_nodes = params + .network_config + .boot_nodes + .into_iter() + .filter(|boot_node| boot_node.peer_id != local_peer_id) + .collect(); + params.network_config.default_peers_set.reserved_nodes = params + .network_config + .default_peers_set + .reserved_nodes + .into_iter() + .filter(|reserved_node| { + if reserved_node.peer_id == local_peer_id { + warn!( + target: "sub-libp2p", + "Local peer ID used in reserved node, ignoring: {}", + reserved_node, + ); + false + } else { + true + } + }) + .collect(); + // Ensure the listen addresses are consistent with the transport. ensure_addresses_consistent_with_transport( params.network_config.listen_addresses.iter(), @@ -183,17 +216,21 @@ where fs::create_dir_all(path)?; } - let transactions_handler_proto = - transactions::TransactionsHandlerPrototype::new(params.protocol_id.clone()); + let transactions_handler_proto = transactions::TransactionsHandlerPrototype::new( + params.protocol_id.clone(), + params + .chain + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + params.fork_id.clone(), + ); params .network_config .extra_sets .insert(0, transactions_handler_proto.set_config()); - // Private and public keys configuration. - let local_identity = params.network_config.node_key.clone().into_keypair()?; - let local_public = local_identity.public(); - let local_peer_id = local_public.to_peer_id(); info!( target: "sub-libp2p", "🏷 Local node identity is: {}", @@ -202,19 +239,11 @@ where let default_notif_handshake_message = Roles::from(¶ms.role).encode(); - let (warp_sync_provider, warp_sync_protocol_config) = match params.warp_sync { - Some((p, c)) => (Some(p), Some(c)), - None => (None, None), - }; - let (protocol, peerset_handle, mut known_addresses) = Protocol::new( - protocol::ProtocolConfig { - roles: From::from(¶ms.role), - max_parallel_downloads: params.network_config.max_parallel_downloads, - sync_mode: params.network_config.sync_mode.clone(), - }, + From::from(¶ms.role), params.chain.clone(), params.protocol_id.clone(), + ¶ms.fork_id, ¶ms.network_config, iter::once(Vec::new()) .chain( @@ -222,9 +251,8 @@ where .map(|_| default_notif_handshake_message.clone()), ) .collect(), - params.block_announce_validator, params.metrics_registry.as_ref(), - warp_sync_provider, + params.chain_sync, )?; // List of multiaddresses that we know in the network. @@ -261,7 +289,6 @@ where let is_major_syncing = Arc::new(AtomicBool::new(false)); // Build the swarm. - let client = params.chain.clone(); let (mut swarm, bandwidth): (Swarm>, _) = { let user_agent = format!( "{} ({})", @@ -347,7 +374,7 @@ where }; let behaviour = { - let bitswap = params.network_config.ipfs_server.then(|| Bitswap::new(client)); + let bitswap = params.network_config.ipfs_server.then(|| Bitswap::new(params.chain)); let result = Behaviour::new( protocol, user_agent, @@ -355,7 +382,7 @@ where discovery_config, params.block_request_protocol_config, params.state_request_protocol_config, - warp_sync_protocol_config, + params.warp_sync_protocol_config, bitswap, params.light_client_request_protocol_config, params.network_config.request_response_protocols, @@ -443,7 +470,6 @@ where let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( service.clone(), - params.role, params.transaction_pool, params.metrics_registry.as_ref(), )?; @@ -676,9 +702,8 @@ where self.service.remove_reserved_peer(peer); } - /// Adds a `PeerId` and its address as reserved. The string should encode the address - /// and peer ID of the remote node. - pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + /// Adds a `PeerId` and its `Multiaddr` as reserved. + pub fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { self.service.add_reserved_peer(peer) } @@ -689,289 +714,6 @@ where } impl NetworkService { - /// Returns the local `PeerId`. - pub fn local_peer_id(&self) -> &PeerId { - &self.local_peer_id - } - - /// Signs the message with the `KeyPair` that defined the local `PeerId`. - pub fn sign_with_local_identity( - &self, - msg: impl AsRef<[u8]>, - ) -> Result { - Signature::sign_message(msg.as_ref(), &self.local_identity) - } - - /// Set authorized peers. - /// - /// Need a better solution to manage authorized peers, but now just use reserved peers for - /// prototyping. - pub fn set_authorized_peers(&self, peers: HashSet) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); - } - - /// Set authorized_only flag. - /// - /// Need a better solution to decide authorized_only, but now just use reserved_only flag for - /// prototyping. - pub fn set_authorized_only(&self, reserved_only: bool) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); - } - - /// Adds an address known to a node. - pub fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); - } - - /// Appends a notification to the buffer of pending outgoing notifications with the given peer. - /// Has no effect if the notifications channel with this protocol name is not open. - /// - /// If the buffer of pending outgoing notifications with that peer is full, the notification - /// is silently dropped and the connection to the remote will start being shut down. This - /// happens if you call this method at a higher rate than the rate at which the peer processes - /// these notifications, or if the available network bandwidth is too low. - /// - /// For this reason, this method is considered soft-deprecated. You are encouraged to use - /// [`NetworkService::notification_sender`] instead. - /// - /// > **Note**: The reason why this is a no-op in the situation where we have no channel is - /// > that we don't guarantee message delivery anyway. Networking issues can cause - /// > connections to drop at any time, and higher-level logic shouldn't differentiate - /// > between the remote voluntarily closing a substream or a network error - /// > preventing the message from being delivered. - /// - /// The protocol must have been registered with - /// `crate::config::NetworkConfiguration::notifications_protocols`. - pub fn write_notification( - &self, - target: PeerId, - protocol: Cow<'static, str>, - message: Vec, - ) { - // We clone the `NotificationsSink` in order to be able to unlock the network-wide - // `peers_notifications_sinks` mutex as soon as possible. - let sink = { - let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { - sink.clone() - } else { - // Notification silently discarded, as documented. - debug!( - target: "sub-libp2p", - "Attempted to send notification on missing or closed substream: {}, {:?}", - target, protocol, - ); - return - } - }; - - if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { - notifications_sizes_metric - .with_label_values(&["out", &protocol]) - .observe(message.len() as f64); - } - - // Sending is communicated to the `NotificationsSink`. - trace!( - target: "sub-libp2p", - "External API => Notification({:?}, {:?}, {} bytes)", - target, protocol, message.len() - ); - trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); - sink.send_sync_notification(message); - } - - /// Obtains a [`NotificationSender`] for a connected peer, if it exists. - /// - /// A `NotificationSender` is scoped to a particular connection to the peer that holds - /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two - /// steps: - /// - /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready - /// for another notification, yielding a [`NotificationSenderReady`] token. - /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation - /// can only fail if the underlying notification substream or connection has suddenly closed. - /// - /// An error is returned by [`NotificationSenderReady::send`] if there exists no open - /// notifications substream with that combination of peer and protocol, or if the remote - /// has asked to close the notifications substream. If that happens, it is guaranteed that an - /// [`Event::NotificationStreamClosed`] has been generated on the stream returned by - /// [`NetworkService::event_stream`]. - /// - /// If the remote requests to close the notifications substream, all notifications successfully - /// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the - /// substream actually gets closed, but attempting to enqueue more notifications will now - /// return an error. It is however possible for the entire connection to be abruptly closed, - /// in which case enqueued notifications will be lost. - /// - /// The protocol must have been registered with - /// `crate::config::NetworkConfiguration::notifications_protocols`. - /// - /// # Usage - /// - /// This method returns a struct that allows waiting until there is space available in the - /// buffer of messages towards the given peer. If the peer processes notifications at a slower - /// rate than we send them, this buffer will quickly fill up. - /// - /// As such, you should never do something like this: - /// - /// ```ignore - /// // Do NOT do this - /// for peer in peers { - /// if let Ok(n) = network.notification_sender(peer, ...) { - /// if let Ok(s) = n.ready().await { - /// let _ = s.send(...); - /// } - /// } - /// } - /// ``` - /// - /// Doing so would slow down all peers to the rate of the slowest one. A malicious or - /// malfunctioning peer could intentionally process notifications at a very slow rate. - /// - /// Instead, you are encouraged to maintain your own buffer of notifications on top of the one - /// maintained by `sc-network`, and use `notification_sender` to progressively send out - /// elements from your buffer. If this additional buffer is full (which will happen at some - /// point if the peer is too slow to process notifications), appropriate measures can be taken, - /// such as removing non-critical notifications from the buffer or disconnecting the peer - /// using [`NetworkService::disconnect_peer`]. - /// - /// - /// Notifications Per-peer buffer - /// broadcast +-------> of notifications +--> `notification_sender` +--> Internet - /// ^ (not covered by - /// | sc-network) - /// + - /// Notifications should be dropped - /// if buffer is full - /// - /// - /// See also the `sc-network-gossip` crate for a higher-level way to send notifications. - pub fn notification_sender( - &self, - target: PeerId, - protocol: Cow<'static, str>, - ) -> Result { - // We clone the `NotificationsSink` in order to be able to unlock the network-wide - // `peers_notifications_sinks` mutex as soon as possible. - let sink = { - let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { - sink.clone() - } else { - return Err(NotificationSenderError::Closed) - } - }; - - let notification_size_metric = self - .notifications_sizes_metric - .as_ref() - .map(|histogram| histogram.with_label_values(&["out", &protocol])); - - Ok(NotificationSender { sink, protocol_name: protocol, notification_size_metric }) - } - - /// Returns a stream containing the events that happen on the network. - /// - /// If this method is called multiple times, the events are duplicated. - /// - /// The stream never ends (unless the `NetworkWorker` gets shut down). - /// - /// The name passed is used to identify the channel in the Prometheus metrics. Note that the - /// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having - /// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory - pub fn event_stream(&self, name: &'static str) -> impl Stream { - let (tx, rx) = out_events::channel(name); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); - rx - } - - /// Sends a single targeted request to a specific peer. On success, returns the response of - /// the peer. - /// - /// Request-response protocols are a way to complement notifications protocols, but - /// notifications should remain the default ways of communicating information. For example, a - /// peer can announce something through a notification, after which the recipient can obtain - /// more information by performing a request. - /// As such, call this function with `IfDisconnected::ImmediateError` for `connect`. This way - /// you will get an error immediately for disconnected peers, instead of waiting for a - /// potentially very long connection attempt, which would suggest that something is wrong - /// anyway, as you are supposed to be connected because of the notification protocol. - /// - /// No limit or throttling of concurrent outbound requests per peer and protocol are enforced. - /// Such restrictions, if desired, need to be enforced at the call site(s). - /// - /// The protocol must have been registered through - /// [`NetworkConfiguration::request_response_protocols`]( - /// crate::config::NetworkConfiguration::request_response_protocols). - pub async fn request( - &self, - target: PeerId, - protocol: impl Into>, - request: Vec, - connect: IfDisconnected, - ) -> Result, RequestFailure> { - let (tx, rx) = oneshot::channel(); - - self.start_request(target, protocol, request, tx, connect); - - match rx.await { - Ok(v) => v, - // The channel can only be closed if the network worker no longer exists. If the - // network worker no longer exists, then all connections to `target` are necessarily - // closed, and we legitimately report this situation as a "ConnectionClosed". - Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)), - } - } - - /// Variation of `request` which starts a request whose response is delivered on a provided - /// channel. - /// - /// Instead of blocking and waiting for a reply, this function returns immediately, sending - /// responses via the passed in sender. This alternative API exists to make it easier to - /// integrate with message passing APIs. - /// - /// Keep in mind that the connected receiver might receive a `Canceled` event in case of a - /// closing connection. This is expected behaviour. With `request` you would get a - /// `RequestFailure::Network(OutboundFailure::ConnectionClosed)` in that case. - pub fn start_request( - &self, - target: PeerId, - protocol: impl Into>, - request: Vec, - tx: oneshot::Sender, RequestFailure>>, - connect: IfDisconnected, - ) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { - target, - protocol: protocol.into(), - request, - pending_response: tx, - connect, - }); - } - - /// High-level network status information. - /// - /// Returns an error if the `NetworkWorker` is no longer running. - pub async fn status(&self) -> Result, ()> { - let (tx, rx) = oneshot::channel(); - - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::NetworkStatus { pending_response: tx }); - - match rx.await { - Ok(v) => v.map_err(|_| ()), - // The channel can only be closed if the network worker no longer exists. - Err(_) => Err(()), - } - } - /// Get network state. /// /// **Note**: Use this only for debugging. This API is unstable. There are warnings literally @@ -992,74 +734,97 @@ impl NetworkService { } } - /// You may call this when new transactions are imported by the transaction pool. - /// - /// All transactions will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - pub fn trigger_repropagate(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransactions); - } - - /// You must call when new transaction is imported by the transaction pool. + /// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates. /// - /// This transaction will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - pub fn propagate_transaction(&self, hash: H) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransaction(hash)); - } + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn split_multiaddr_and_peer_id( + &self, + peers: HashSet, + ) -> Result, String> { + peers + .into_iter() + .map(|mut addr| { + let peer = match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) + .map_err(|_| "Invalid PeerId format".to_string())?, + _ => return Err("Missing PeerId from address".to_string()), + }; - /// Make sure an important block is propagated to peers. - /// - /// In chain-based consensus, we often need to make sure non-best forks are - /// at least temporarily synced. This function forces such an announcement. - pub fn announce_block(&self, hash: B::Hash, data: Option>) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); + // Make sure the local peer ID is never added to the PSM + // or added as a "known address", even if given. + if peer == self.local_peer_id { + Err("Local peer ID in peer set.".to_string()) + } else { + Ok((peer, addr)) + } + }) + .collect::, String>>() } +} - /// Report a given peer as either beneficial (+) or costly (-) according to the - /// given scalar. - pub fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - self.peerset.report_peer(who, cost_benefit); +impl sp_consensus::SyncOracle for NetworkService { + fn is_major_syncing(&self) -> bool { + self.is_major_syncing.load(Ordering::Relaxed) } - /// Disconnect from a node as soon as possible. - /// - /// This triggers the same effects as if the connection had closed itself spontaneously. - /// - /// See also [`NetworkService::remove_from_peers_set`], which has the same effect but also - /// prevents the local node from re-establishing an outgoing substream to this peer until it - /// is added again. - pub fn disconnect_peer(&self, who: PeerId, protocol: impl Into>) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol.into())); + fn is_offline(&self) -> bool { + self.num_connected.load(Ordering::Relaxed) == 0 } +} +impl sc_consensus::JustificationSyncLink for NetworkService { /// Request a justification for the given block from the network. /// /// On success, the justification will be passed to the import queue that was part at /// initialization as part of the configuration. - pub fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { let _ = self .to_worker .unbounded_send(ServiceToWorkerMsg::RequestJustification(*hash, number)); } - /// Clear all pending justification requests. - pub fn clear_justification_requests(&self) { + fn clear_justification_requests(&self) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::ClearJustificationRequests); } +} - /// Are we in the process of downloading the chain? - pub fn is_major_syncing(&self) -> bool { - self.is_major_syncing.load(Ordering::Relaxed) +impl NetworkStateInfo for NetworkService +where + B: sp_runtime::traits::Block, + H: ExHashT, +{ + /// Returns the local external addresses. + fn external_addresses(&self) -> Vec { + self.external_addresses.lock().clone() } + /// Returns the local Peer ID. + fn local_peer_id(&self) -> PeerId { + self.local_peer_id + } +} + +impl NetworkSigner for NetworkService +where + B: sp_runtime::traits::Block, + H: ExHashT, +{ + fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result { + Signature::sign_message(msg.as_ref(), &self.local_identity) + } +} + +impl NetworkDHTProvider for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ /// Start getting a value from the DHT. /// /// This will generate either a `ValueFound` or a `ValueNotFound` event and pass it as an /// item on the [`NetworkWorker`] stream. - pub fn get_value(&self, key: &KademliaKey) { + fn get_value(&self, key: &KademliaKey) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::GetValue(key.clone())); } @@ -1067,62 +832,103 @@ impl NetworkService { /// /// This will generate either a `ValuePut` or a `ValuePutFailed` event and pass it as an /// item on the [`NetworkWorker`] stream. - pub fn put_value(&self, key: KademliaKey, value: Vec) { + fn put_value(&self, key: KademliaKey, value: Vec) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); } +} + +impl NetworkSyncForkRequest> for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + /// Configure an explicit fork sync request. + /// Note that this function should not be used for recent blocks. + /// Sync should be able to download all the recent forks normally. + /// `set_sync_fork_request` should only be used if external code detects that there's + /// a stale fork missing. + /// Passing empty `peers` set effectively removes the sync request. + fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); + } +} + +#[async_trait::async_trait] +impl NetworkStatusProvider for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + async fn status(&self) -> Result, ()> { + let (tx, rx) = oneshot::channel(); + + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::NetworkStatus { pending_response: tx }); + + match rx.await { + Ok(v) => v.map_err(|_| ()), + // The channel can only be closed if the network worker no longer exists. + Err(_) => Err(()), + } + } +} + +impl NetworkPeers for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn set_authorized_peers(&self, peers: HashSet) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); + } + + fn set_authorized_only(&self, reserved_only: bool) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); + } + + fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + } - /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. - pub fn accept_unreserved_peers(&self) { + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + self.peerset.report_peer(who, cost_benefit); + } + + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol)); + } + + fn accept_unreserved_peers(&self) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(false)); } - /// Disconnect from unreserved peers and deny new unreserved peers to connect for syncing - /// purposes. - pub fn deny_unreserved_peers(&self) { + fn deny_unreserved_peers(&self) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(true)); } - /// Adds a `PeerId` and its address as reserved. The string should encode the address - /// and peer ID of the remote node. - /// - /// Returns an `Err` if the given string is not a valid multiaddress - /// or contains an invalid peer ID (which includes the local peer ID). - pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { - let (peer_id, addr) = parse_str_addr(&peer).map_err(|e| format!("{:?}", e))?; + fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { // Make sure the local peer ID is never added to the PSM. - if peer_id == self.local_peer_id { + if peer.peer_id == self.local_peer_id { return Err("Local peer ID cannot be added as a reserved peer.".to_string()) } let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddReserved(peer_id)); + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer.peer_id, peer.multiaddr)); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddReserved(peer.peer_id)); Ok(()) } - /// Removes a `PeerId` from the list of reserved peers. - pub fn remove_reserved_peer(&self, peer_id: PeerId) { + fn remove_reserved_peer(&self, peer_id: PeerId) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RemoveReserved(peer_id)); } - /// Sets the reserved set of a protocol to the given set of peers. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. - /// - /// The node will start establishing/accepting connections and substreams to/from peers in this - /// set, if it doesn't have any substream open with them yet. - /// - /// Note however, if a call to this function results in less peers on the reserved set, they - /// will not necessarily get disconnected (depending on available free slots in the peer set). - /// If you want to also disconnect those removed peers, you will have to call - /// `remove_from_peers_set` on those in addition to updating the reserved set. You can omit - /// this step if the peer set is in reserved only mode. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - pub fn set_reserved_peers( + fn set_reserved_peers( &self, protocol: Cow<'static, str>, peers: HashSet, @@ -1153,14 +959,7 @@ impl NetworkService { Ok(()) } - /// Add peers to a peer set. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - pub fn add_peers_to_reserved_set( + fn add_peers_to_reserved_set( &self, protocol: Cow<'static, str>, peers: HashSet, @@ -1186,8 +985,7 @@ impl NetworkService { Ok(()) } - /// Remove peers from a peer set. - pub fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { + fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { for peer_id in peers.into_iter() { let _ = self .to_worker @@ -1195,26 +993,7 @@ impl NetworkService { } } - /// Configure an explicit fork sync request. - /// Note that this function should not be used for recent blocks. - /// Sync should be able to download all the recent forks normally. - /// `set_sync_fork_request` should only be used if external code detects that there's - /// a stale fork missing. - /// Passing empty `peers` set effectively removes the sync request. - pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); - } - - /// Add a peer to a set of peers. - /// - /// If the set has slots available, it will try to open a substream with this peer. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - pub fn add_to_peers_set( + fn add_to_peers_set( &self, protocol: Cow<'static, str>, peers: HashSet, @@ -1240,10 +1019,7 @@ impl NetworkService { Ok(()) } - /// Remove peers from a peer set. - /// - /// If we currently have an open substream with this peer, it will soon be closed. - pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { + fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { for peer_id in peers.into_iter() { let _ = self .to_worker @@ -1251,90 +1027,158 @@ impl NetworkService { } } - /// Returns the number of peers we're connected to. - pub fn num_connected(&self) -> usize { + fn sync_num_connected(&self) -> usize { self.num_connected.load(Ordering::Relaxed) } +} - /// Inform the network service about new best imported block. - pub fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number)); +impl NetworkEventStream for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn event_stream(&self, name: &'static str) -> Pin + Send>> { + let (tx, rx) = out_events::channel(name); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); + Box::pin(rx) } +} - /// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - fn split_multiaddr_and_peer_id( - &self, - peers: HashSet, - ) -> Result, String> { - peers - .into_iter() - .map(|mut addr| { - let peer = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) - .map_err(|_| "Invalid PeerId format".to_string())?, - _ => return Err("Missing PeerId from address".to_string()), - }; +impl NetworkNotification for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec) { + // We clone the `NotificationsSink` in order to be able to unlock the network-wide + // `peers_notifications_sinks` mutex as soon as possible. + let sink = { + let peers_notifications_sinks = self.peers_notifications_sinks.lock(); + if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { + sink.clone() + } else { + // Notification silently discarded, as documented. + debug!( + target: "sub-libp2p", + "Attempted to send notification on missing or closed substream: {}, {:?}", + target, protocol, + ); + return + } + }; - // Make sure the local peer ID is never added to the PSM - // or added as a "known address", even if given. - if peer == self.local_peer_id { - Err("Local peer ID in peer set.".to_string()) - } else { - Ok((peer, addr)) - } - }) - .collect::, String>>() - } -} + if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { + notifications_sizes_metric + .with_label_values(&["out", &protocol]) + .observe(message.len() as f64); + } -impl sp_consensus::SyncOracle for NetworkService { - fn is_major_syncing(&mut self) -> bool { - Self::is_major_syncing(self) + // Sending is communicated to the `NotificationsSink`. + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {:?}, {} bytes)", + target, protocol, message.len() + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); + sink.send_sync_notification(message); } - fn is_offline(&mut self) -> bool { - self.num_connected.load(Ordering::Relaxed) == 0 + fn notification_sender( + &self, + target: PeerId, + protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + // We clone the `NotificationsSink` in order to be able to unlock the network-wide + // `peers_notifications_sinks` mutex as soon as possible. + let sink = { + let peers_notifications_sinks = self.peers_notifications_sinks.lock(); + if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { + sink.clone() + } else { + return Err(NotificationSenderError::Closed) + } + }; + + let notification_size_metric = self + .notifications_sizes_metric + .as_ref() + .map(|histogram| histogram.with_label_values(&["out", &protocol])); + + Ok(Box::new(NotificationSender { sink, protocol_name: protocol, notification_size_metric })) } } -impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle for &'a NetworkService { - fn is_major_syncing(&mut self) -> bool { - NetworkService::is_major_syncing(self) +#[async_trait::async_trait] +impl NetworkRequest for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + async fn request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + connect: IfDisconnected, + ) -> Result, RequestFailure> { + let (tx, rx) = oneshot::channel(); + + self.start_request(target, protocol, request, tx, connect); + + match rx.await { + Ok(v) => v, + // The channel can only be closed if the network worker no longer exists. If the + // network worker no longer exists, then all connections to `target` are necessarily + // closed, and we legitimately report this situation as a "ConnectionClosed". + Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)), + } } - fn is_offline(&mut self) -> bool { - self.num_connected.load(Ordering::Relaxed) == 0 + fn start_request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + tx: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, + ) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { + target, + protocol: protocol.into(), + request, + pending_response: tx, + connect, + }); } } -impl sc_consensus::JustificationSyncLink for NetworkService { - fn request_justification(&self, hash: &B::Hash, number: NumberFor) { - Self::request_justification(self, hash, number); +impl NetworkTransaction for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn trigger_repropagate(&self) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransactions); } - fn clear_justification_requests(&self) { - Self::clear_justification_requests(self); + fn propagate_transaction(&self, hash: H) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransaction(hash)); } } -impl NetworkStateInfo for NetworkService +impl NetworkBlock> for NetworkService where - B: sp_runtime::traits::Block, + B: BlockT + 'static, H: ExHashT, { - /// Returns the local external addresses. - fn external_addresses(&self) -> Vec { - self.external_addresses.lock().clone() + fn announce_block(&self, hash: B::Hash, data: Option>) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); } - /// Returns the local Peer ID. - fn local_peer_id(&self) -> PeerId { - self.local_peer_id + fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number)); } } @@ -1351,26 +1195,27 @@ pub struct NotificationSender { notification_size_metric: Option, } -impl NotificationSender { - /// Returns a future that resolves when the `NotificationSender` is ready to send a - /// notification. - pub async fn ready(&self) -> Result, NotificationSenderError> { - Ok(NotificationSenderReady { +#[async_trait::async_trait] +impl NotificationSenderT for NotificationSender { + async fn ready( + &self, + ) -> Result, NotificationSenderError> { + Ok(Box::new(NotificationSenderReady { ready: match self.sink.reserve_notification().await { - Ok(r) => r, + Ok(r) => Some(r), Err(()) => return Err(NotificationSenderError::Closed), }, peer_id: self.sink.peer_id(), protocol_name: &self.protocol_name, notification_size_metric: self.notification_size_metric.clone(), - }) + })) } } /// Reserved slot in the notifications buffer, ready to accept data. #[must_use] pub struct NotificationSenderReady<'a> { - ready: Ready<'a>, + ready: Option>, /// Target of the notification. peer_id: &'a PeerId, @@ -1383,11 +1228,8 @@ pub struct NotificationSenderReady<'a> { notification_size_metric: Option, } -impl<'a> NotificationSenderReady<'a> { - /// Consumes this slots reservation and actually queues the notification. - pub fn send(self, notification: impl Into>) -> Result<(), NotificationSenderError> { - let notification = notification.into(); - +impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> { + fn send(&mut self, notification: Vec) -> Result<(), NotificationSenderError> { if let Some(notification_size_metric) = &self.notification_size_metric { notification_size_metric.observe(notification.len() as f64); } @@ -1399,26 +1241,14 @@ impl<'a> NotificationSenderReady<'a> { ); trace!(target: "sub-libp2p", "Handler({:?}) <= Async notification", self.peer_id); - self.ready.send(notification).map_err(|()| NotificationSenderError::Closed) + self.ready + .take() + .ok_or(NotificationSenderError::Closed)? + .send(notification) + .map_err(|()| NotificationSenderError::Closed) } } -/// Error returned by [`NetworkService::send_notification`]. -#[derive(Debug, thiserror::Error)] -pub enum NotificationSenderError { - /// The notification receiver has been closed, usually because the underlying connection - /// closed. - /// - /// Some of the notifications most recently sent may not have been received. However, - /// the peer may still be connected and a new `NotificationSender` for the same - /// protocol obtained from [`NetworkService::notification_sender`]. - #[error("The notification receiver has been closed")] - Closed, - /// Protocol name hasn't been registered. - #[error("Protocol name hasn't been registered")] - BadProtocol, -} - /// Messages sent from the `NetworkService` to the `NetworkWorker`. /// /// Each entry corresponds to a method of `NetworkService`. @@ -1979,12 +1809,16 @@ where if this.boot_node_ids.contains(&peer_id) { if let DialError::WrongPeerId { obtained, endpoint } = &error { - error!( - "💔 The bootnode you want to connect provided a different peer ID than the one you expect: `{}` with `{}`:`{:?}`.", - peer_id, - obtained, - endpoint, - ); + if let ConnectedPoint::Dialer { address, role_override: _ } = + endpoint + { + warn!( + "💔 The bootnode you want to connect to at `{}` provided a different peer ID `{}` than the one you expect `{}`.", + address, + obtained, + peer_id, + ); + } } } } diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index c95b46af4cefa..4144d7f19551e 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -31,11 +31,10 @@ //! - Send events by calling [`OutChannels::send`]. Events are cloned for each sender in the //! collection. -use crate::Event; - use futures::{channel::mpsc, prelude::*, ready, stream::FusedStream}; use parking_lot::Mutex; use prometheus_endpoint::{register, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64}; +use sc_network_common::protocol::event::Event; use std::{ cell::RefCell, fmt, diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 808546d67fc7c..e2fe58423abfe 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -16,15 +16,21 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{ - config, state_request_handler::StateRequestHandler, Event, NetworkService, NetworkWorker, -}; +use crate::{config, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::PeerId; -use sc_network_common::config::ProtocolId; +use sc_network_common::{ + config::{MultiaddrWithPeerId, ProtocolId}, + protocol::event::Event, + service::{NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo}, +}; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; -use sc_network_sync::block_request_handler::BlockRequestHandler; +use sc_network_sync::{ + block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, + ChainSync, +}; +use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use sp_runtime::traits::{Block as BlockT, Header as _}; use std::{borrow::Cow, sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; @@ -40,7 +46,7 @@ type TestNetworkService = NetworkService< /// > **Note**: We return the events stream in order to not possibly lose events between the /// > construction of the service and the moment the events stream is grabbed. fn build_test_full_node( - config: config::NetworkConfiguration, + network_config: config::NetworkConfiguration, ) -> (Arc, impl Stream) { let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); @@ -90,44 +96,60 @@ fn build_test_full_node( let protocol_id = ProtocolId::from("/test-protocol-name"); + let fork_id = Some(String::from("test-fork-id")); + let block_request_protocol_config = { - let (handler, protocol_config) = BlockRequestHandler::new(&protocol_id, client.clone(), 50); + let (handler, protocol_config) = + BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); async_std::task::spawn(handler.run().boxed()); protocol_config }; let state_request_protocol_config = { - let (handler, protocol_config) = StateRequestHandler::new(&protocol_id, client.clone(), 50); + let (handler, protocol_config) = + StateRequestHandler::new(&protocol_id, None, client.clone(), 50); async_std::task::spawn(handler.run().boxed()); protocol_config }; let light_client_request_protocol_config = { let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, client.clone()); + LightClientRequestHandler::new(&protocol_id, None, client.clone()); async_std::task::spawn(handler.run().boxed()); protocol_config }; + let chain_sync = ChainSync::new( + match network_config.sync_mode { + config::SyncMode::Full => sc_network_common::sync::SyncMode::Full, + config::SyncMode::Fast { skip_proofs, storage_chain_mode } => + sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, + config::SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, + }, + client.clone(), + Box::new(DefaultBlockAnnounceValidator), + network_config.max_parallel_downloads, + None, + ) + .unwrap(); let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, transactions_handler_executor: Box::new(|task| { async_std::task::spawn(task); }), - network_config: config, + network_config, chain: client.clone(), - transaction_pool: Arc::new(crate::config::EmptyTransactionPool), + transaction_pool: Arc::new(config::EmptyTransactionPool), protocol_id, + fork_id, import_queue, - block_announce_validator: Box::new( - sp_consensus::block_validation::DefaultBlockAnnounceValidator, - ), + chain_sync: Box::new(chain_sync), metrics_registry: None, block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, - warp_sync: None, + warp_sync_protocol_config: None, }) .unwrap(); @@ -172,9 +194,9 @@ fn build_nodes_one_proto() -> ( fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: config::SetConfig { - reserved_nodes: vec![config::MultiaddrWithPeerId { + reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), + peer_id: node1.local_peer_id(), }], ..Default::default() }, @@ -196,18 +218,10 @@ fn notifications_state_consistent() { // Write some initial notifications that shouldn't get through. for _ in 0..(rand::random::() % 5) { - node1.write_notification( - node2.local_peer_id().clone(), - PROTOCOL_NAME, - b"hello world".to_vec(), - ); + node1.write_notification(node2.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec()); } for _ in 0..(rand::random::() % 5) { - node2.write_notification( - node1.local_peer_id().clone(), - PROTOCOL_NAME, - b"hello world".to_vec(), - ); + node2.write_notification(node1.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec()); } async_std::task::block_on(async move { @@ -231,14 +245,14 @@ fn notifications_state_consistent() { // test consists in ensuring that notifications get ignored if the stream isn't open. if rand::random::() % 5 >= 3 { node1.write_notification( - node2.local_peer_id().clone(), + node2.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec(), ); } if rand::random::() % 5 >= 3 { node2.write_notification( - node1.local_peer_id().clone(), + node1.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec(), ); @@ -246,10 +260,10 @@ fn notifications_state_consistent() { // Also randomly disconnect the two nodes from time to time. if rand::random::() % 20 == 0 { - node1.disconnect_peer(node2.local_peer_id().clone(), PROTOCOL_NAME); + node1.disconnect_peer(node2.local_peer_id(), PROTOCOL_NAME); } if rand::random::() % 20 == 0 { - node2.disconnect_peer(node1.local_peer_id().clone(), PROTOCOL_NAME); + node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME); } // Grab next event from either `events_stream1` or `events_stream2`. @@ -277,7 +291,7 @@ fn notifications_state_consistent() { something_happened = true; assert!(!node1_to_node2_open); node1_to_node2_open = true; - assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(remote, node2.local_peer_id()); }, future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. @@ -286,7 +300,7 @@ fn notifications_state_consistent() { something_happened = true; assert!(!node2_to_node1_open); node2_to_node1_open = true; - assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(remote, node1.local_peer_id()); }, future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. @@ -294,7 +308,7 @@ fn notifications_state_consistent() { if protocol == PROTOCOL_NAME { assert!(node1_to_node2_open); node1_to_node2_open = false; - assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(remote, node2.local_peer_id()); }, future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. @@ -302,14 +316,14 @@ fn notifications_state_consistent() { if protocol == PROTOCOL_NAME { assert!(node2_to_node1_open); node2_to_node1_open = false; - assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(remote, node1.local_peer_id()); }, future::Either::Left(Event::NotificationsReceived { remote, .. }) => { assert!(node1_to_node2_open); - assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(remote, node2.local_peer_id()); if rand::random::() % 5 >= 4 { node1.write_notification( - node2.local_peer_id().clone(), + node2.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec(), ); @@ -317,10 +331,10 @@ fn notifications_state_consistent() { }, future::Either::Right(Event::NotificationsReceived { remote, .. }) => { assert!(node2_to_node1_open); - assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(remote, node1.local_peer_id()); if rand::random::() % 5 >= 4 { node2.write_notification( - node1.local_peer_id().clone(), + node1.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec(), ); @@ -355,7 +369,7 @@ fn lots_of_incoming_peers_works() { ..config::NetworkConfiguration::new_local() }); - let main_node_peer_id = *main_node.local_peer_id(); + let main_node_peer_id = main_node.local_peer_id(); // We spawn background tasks and push them in this `Vec`. They will all be waited upon before // this test ends. @@ -369,7 +383,7 @@ fn lots_of_incoming_peers_works() { fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: config::SetConfig { - reserved_nodes: vec![config::MultiaddrWithPeerId { + reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr.clone(), peer_id: main_node_peer_id, }], @@ -458,8 +472,13 @@ fn notifications_back_pressure() { // Sending! for num in 0..TOTAL_NOTIFS { - let notif = node1.notification_sender(node2_id.clone(), PROTOCOL_NAME).unwrap(); - notif.ready().await.unwrap().send(format!("hello #{}", num)).unwrap(); + let notif = node1.notification_sender(node2_id, PROTOCOL_NAME).unwrap(); + notif + .ready() + .await + .unwrap() + .send(format!("hello #{}", num).into_bytes()) + .unwrap(); } receiver.await; @@ -494,9 +513,9 @@ fn fallback_name_working() { fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: config::SetConfig { - reserved_nodes: vec![config::MultiaddrWithPeerId { + reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), + peer_id: node1.local_peer_id(), }], ..Default::default() }, @@ -565,7 +584,7 @@ fn ensure_listen_addresses_consistent_with_transport_not_memory() { #[should_panic(expected = "don't match the transport")] fn ensure_boot_node_addresses_consistent_with_transport_memory() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let boot_node = config::MultiaddrWithPeerId { + let boot_node = MultiaddrWithPeerId { multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)], peer_id: PeerId::random(), }; @@ -582,7 +601,7 @@ fn ensure_boot_node_addresses_consistent_with_transport_memory() { #[should_panic(expected = "don't match the transport")] fn ensure_boot_node_addresses_consistent_with_transport_not_memory() { let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; - let boot_node = config::MultiaddrWithPeerId { + let boot_node = MultiaddrWithPeerId { multiaddr: config::build_multiaddr![Memory(rand::random::())], peer_id: PeerId::random(), }; @@ -598,7 +617,7 @@ fn ensure_boot_node_addresses_consistent_with_transport_not_memory() { #[should_panic(expected = "don't match the transport")] fn ensure_reserved_node_addresses_consistent_with_transport_memory() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let reserved_node = config::MultiaddrWithPeerId { + let reserved_node = MultiaddrWithPeerId { multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)], peer_id: PeerId::random(), }; @@ -618,7 +637,7 @@ fn ensure_reserved_node_addresses_consistent_with_transport_memory() { #[should_panic(expected = "don't match the transport")] fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; - let reserved_node = config::MultiaddrWithPeerId { + let reserved_node = MultiaddrWithPeerId { multiaddr: config::build_multiaddr![Memory(rand::random::())], peer_id: PeerId::random(), }; diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 1f54f05d7446f..f7e4d774ca812 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -32,7 +32,7 @@ use crate::{ protocol::message, service::NetworkService, utils::{interval, LruHashSet}, - Event, ExHashT, ObservedRole, + ExHashT, }; use codec::{Decode, Encode}; @@ -40,7 +40,11 @@ use futures::{channel::mpsc, prelude::*, stream::FuturesUnordered}; use libp2p::{multiaddr, PeerId}; use log::{debug, trace, warn}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network_common::config::ProtocolId; +use sc_network_common::{ + config::ProtocolId, + protocol::event::{Event, ObservedRole}, + service::{NetworkEventStream, NetworkNotification, NetworkPeers}, +}; use sp_runtime::traits::Block as BlockT; use std::{ borrow::Cow, @@ -83,8 +87,6 @@ mod rep { pub const GOOD_TRANSACTION: Rep = Rep::new(1 << 7, "Good transaction"); /// Reputation change when a peer sends us a bad transaction. pub const BAD_TRANSACTION: Rep = Rep::new(-(1 << 12), "Bad transaction"); - /// We received an unexpected transaction packet. - pub const UNEXPECTED_TRANSACTIONS: Rep = Rep::new_fatal("Unexpected transactions packet"); } struct Metrics { @@ -129,19 +131,34 @@ impl Future for PendingTransaction { /// Prototype for a [`TransactionsHandler`]. pub struct TransactionsHandlerPrototype { protocol_name: Cow<'static, str>, + fallback_protocol_names: Vec>, } impl TransactionsHandlerPrototype { /// Create a new instance. - pub fn new(protocol_id: ProtocolId) -> Self { - Self { protocol_name: format!("/{}/transactions/1", protocol_id.as_ref()).into() } + pub fn new>( + protocol_id: ProtocolId, + genesis_hash: Hash, + fork_id: Option, + ) -> Self { + let protocol_name = if let Some(fork_id) = fork_id { + format!("/{}/{}/transactions/1", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/transactions/1", hex::encode(genesis_hash)) + }; + let legacy_protocol_name = format!("/{}/transactions/1", protocol_id.as_ref()); + + Self { + protocol_name: protocol_name.into(), + fallback_protocol_names: iter::once(legacy_protocol_name.into()).collect(), + } } /// Returns the configuration of the set to put in the network configuration. pub fn set_config(&self) -> config::NonDefaultSetConfig { config::NonDefaultSetConfig { notifications_protocol: self.protocol_name.clone(), - fallback_names: Vec::new(), + fallback_names: self.fallback_protocol_names.clone(), max_notification_size: MAX_TRANSACTIONS_SIZE, set_config: config::SetConfig { in_peers: 0, @@ -160,11 +177,10 @@ impl TransactionsHandlerPrototype { pub fn build( self, service: Arc>, - local_role: config::Role, transaction_pool: Arc>, metrics_registry: Option<&Registry>, ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { - let event_stream = service.event_stream("transactions-handler").boxed(); + let event_stream = service.event_stream("transactions-handler"); let (to_handler, from_controller) = mpsc::unbounded(); let gossip_enabled = Arc::new(AtomicBool::new(false)); @@ -178,7 +194,6 @@ impl TransactionsHandlerPrototype { event_stream, peers: HashMap::new(), transaction_pool, - local_role, from_controller, metrics: if let Some(r) = metrics_registry { Some(Metrics::register(r)?) @@ -247,7 +262,6 @@ pub struct TransactionsHandler { peers: HashMap>, transaction_pool: Arc>, gossip_enabled: Arc, - local_role: config::Role, from_controller: mpsc::UnboundedReceiver>, /// Prometheus metrics. metrics: Option, @@ -360,14 +374,6 @@ impl TransactionsHandler { /// Called when peer sends us new transactions fn on_transactions(&mut self, who: PeerId, transactions: message::Transactions) { - // sending transaction to light node is considered a bad behavior - if matches!(self.local_role, config::Role::Light) { - debug!(target: "sync", "Peer {} is trying to send transactions to the light node", who); - self.service.disconnect_peer(who, self.protocol_name.clone()); - self.service.report_peer(who, rep::UNEXPECTED_TRANSACTIONS); - return - } - // Accept transactions only when enabled if !self.gossip_enabled.load(Ordering::Relaxed) { trace!(target: "sync", "{} Ignoring transactions while disabled", who); diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs index 883f828e7db51..23645b11795c3 100644 --- a/client/network/src/transport.rs +++ b/client/network/src/transport.rs @@ -54,16 +54,17 @@ pub fn build_transport( ) -> (Boxed<(PeerId, StreamMuxerBox)>, Arc) { // Build the base layer of the transport. let transport = if !memory_only { - let desktop_trans = tcp::TcpConfig::new().nodelay(true); + let tcp_config = tcp::GenTcpConfig::new().nodelay(true); + let desktop_trans = tcp::TcpTransport::new(tcp_config.clone()); let desktop_trans = websocket::WsConfig::new(desktop_trans) - .or_transport(tcp::TcpConfig::new().nodelay(true)); + .or_transport(tcp::TcpTransport::new(tcp_config.clone())); let dns_init = futures::executor::block_on(dns::DnsConfig::system(desktop_trans)); EitherTransport::Left(if let Ok(dns) = dns_init { EitherTransport::Left(dns) } else { - let desktop_trans = tcp::TcpConfig::new().nodelay(true); + let desktop_trans = tcp::TcpTransport::new(tcp_config.clone()); let desktop_trans = websocket::WsConfig::new(desktop_trans) - .or_transport(tcp::TcpConfig::new().nodelay(true)); + .or_transport(tcp::TcpTransport::new(tcp_config)); EitherTransport::Right(desktop_trans.map_err(dns::DnsErr::Transport)) }) } else { diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 9185e812bc6cd..7c8f8adafd214 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -17,13 +17,12 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.10" [dependencies] -bitflags = "1.3.2" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } -either = "1.5.3" futures = "0.3.21" -libp2p = "0.45.1" +hex = "0.4.0" +libp2p = "0.46.1" log = "0.4.17" lru = "0.7.5" prost = "0.10" diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index 4cd69d1fac4f5..f4f10ac73c8d9 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -17,10 +17,7 @@ //! Helper for handling (i.e. answering) block requests from a remote peer via the //! `crate::request_responses::RequestResponsesBehaviour`. -use crate::{ - message::BlockAttributes, - schema::v1::{block_request::FromBlock, BlockResponse, Direction}, -}; +use crate::schema::v1::{block_request::FromBlock, BlockResponse, Direction}; use codec::{Decode, Encode}; use futures::{ channel::{mpsc, oneshot}, @@ -34,6 +31,7 @@ use sc_client_api::BlockBackend; use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, + sync::message::BlockAttributes, }; use sp_blockchain::HeaderBackend; use sp_runtime::{ @@ -64,9 +62,15 @@ mod rep { } /// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. -pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { +pub fn generate_protocol_config>( + protocol_id: &ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, +) -> ProtocolConfig { ProtocolConfig { - name: generate_protocol_name(protocol_id).into(), + name: generate_protocol_name(genesis_hash, fork_id).into(), + fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) + .collect(), max_request_size: 1024 * 1024, max_response_size: 16 * 1024 * 1024, request_timeout: Duration::from_secs(20), @@ -74,8 +78,17 @@ pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { } } -/// Generate the block protocol name from chain specific protocol identifier. -fn generate_protocol_name(protocol_id: &ProtocolId) -> String { +/// Generate the block protocol name from the genesis hash and fork id. +fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + if let Some(fork_id) = fork_id { + format!("/{}/{}/sync/2", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/sync/2", hex::encode(genesis_hash)) + } +} + +/// Generate the legacy block protocol name from chain specific protocol identifier. +fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String { format!("/{}/sync/2", protocol_id.as_ref()) } @@ -131,6 +144,7 @@ where /// Create a new [`BlockRequestHandler`]. pub fn new( protocol_id: &ProtocolId, + fork_id: Option<&str>, client: Arc, num_peer_hint: usize, ) -> (Self, ProtocolConfig) { @@ -138,7 +152,15 @@ where // number of peers. let (tx, request_receiver) = mpsc::channel(num_peer_hint); - let mut protocol_config = generate_protocol_config(protocol_id); + let mut protocol_config = generate_protocol_config( + protocol_id, + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + fork_id, + ); protocol_config.inbound_queue = Some(tx); let seen_requests = LruCache::new(num_peer_hint * 2); @@ -228,16 +250,13 @@ where debug!( target: LOG_TARGET, - "Handling block request from {}: Starting at `{:?}` with maximum blocks \ - of `{}`, direction `{:?}` and attributes `{:?}`.", - peer, - from_block_id, - max_blocks, - direction, - attributes, + "Handling block request from {peer}: Starting at `{from_block_id:?}` with \ + maximum blocks of `{max_blocks}`, reputation_change: `{reputation_change:?}`, \ + small_request `{small_request:?}`, direction `{direction:?}` and \ + attributes `{attributes:?}`.", ); - let result = if reputation_change.is_none() || small_request { + let maybe_block_response = if reputation_change.is_none() || small_request { let block_response = self.get_block_response( attributes, from_block_id, @@ -261,9 +280,22 @@ where } } + Some(block_response) + } else { + None + }; + + debug!( + target: LOG_TARGET, + "Sending result of block request from {peer} starting at `{from_block_id:?}`: \ + blocks: {:?}, data: {:?}", + maybe_block_response.as_ref().map(|res| res.blocks.len()), + maybe_block_response.as_ref().map(|res| res.encoded_len()), + ); + + let result = if let Some(block_response) = maybe_block_response { let mut data = Vec::with_capacity(block_response.encoded_len()); block_response.encode(&mut data)?; - Ok(data) } else { Err(()) diff --git a/client/network/sync/src/blocks.rs b/client/network/sync/src/blocks.rs index b897184e7a44c..b8acd61a2009f 100644 --- a/client/network/sync/src/blocks.rs +++ b/client/network/sync/src/blocks.rs @@ -16,9 +16,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::message; use libp2p::PeerId; use log::trace; +use sc_network_common::sync::message; use sp_runtime::traits::{Block as BlockT, NumberFor, One}; use std::{ cmp, @@ -39,6 +39,7 @@ pub struct BlockData { enum BlockRangeState { Downloading { len: NumberFor, downloading: u32 }, Complete(Vec>), + Queued { len: NumberFor }, } impl BlockRangeState { @@ -46,6 +47,7 @@ impl BlockRangeState { match *self { Self::Downloading { len, .. } => len, Self::Complete(ref blocks) => (blocks.len() as u32).into(), + Self::Queued { len } => len, } } } @@ -56,12 +58,19 @@ pub struct BlockCollection { /// Downloaded blocks. blocks: BTreeMap, BlockRangeState>, peer_requests: HashMap>, + /// Block ranges downloaded and queued for import. + /// Maps start_hash => (start_num, end_num). + queued_blocks: HashMap, NumberFor)>, } impl BlockCollection { /// Create a new instance. pub fn new() -> Self { - Self { blocks: BTreeMap::new(), peer_requests: HashMap::new() } + Self { + blocks: BTreeMap::new(), + peer_requests: HashMap::new(), + queued_blocks: HashMap::new(), + } } /// Clear everything. @@ -170,29 +179,48 @@ impl BlockCollection { } /// Get a valid chain of blocks ordered in descending order and ready for importing into - /// blockchain. - pub fn drain(&mut self, from: NumberFor) -> Vec> { - let mut drained = Vec::new(); - let mut ranges = Vec::new(); + /// the blockchain. + /// `from` is the maximum block number for the start of the range that we are interested in. + /// The function will return empty Vec if the first block ready is higher than `from`. + /// For each returned block hash `clear_queued` must be called at some later stage. + pub fn ready_blocks(&mut self, from: NumberFor) -> Vec> { + let mut ready = Vec::new(); let mut prev = from; - for (start, range_data) in &mut self.blocks { - match range_data { - BlockRangeState::Complete(blocks) if *start <= prev => { - prev = *start + (blocks.len() as u32).into(); - // Remove all elements from `blocks` and add them to `drained` - drained.append(blocks); - ranges.push(*start); + for (&start, range_data) in &mut self.blocks { + if start > prev { + break + } + let len = match range_data { + BlockRangeState::Complete(blocks) => { + let len = (blocks.len() as u32).into(); + prev = start + len; + if let Some(BlockData { block, .. }) = blocks.first() { + self.queued_blocks + .insert(block.hash, (start, start + (blocks.len() as u32).into())); + } + // Remove all elements from `blocks` and add them to `ready` + ready.append(blocks); + len }, + BlockRangeState::Queued { .. } => continue, _ => break, - } + }; + *range_data = BlockRangeState::Queued { len }; } + trace!(target: "sync", "{} blocks ready for import", ready.len()); + ready + } - for r in ranges { - self.blocks.remove(&r); + pub fn clear_queued(&mut self, hash: &B::Hash) { + if let Some((from, to)) = self.queued_blocks.remove(hash) { + let mut block_num = from; + while block_num < to { + self.blocks.remove(&block_num); + block_num += One::one(); + } + trace!(target: "sync", "Cleared blocks from {:?} to {:?}", from, to); } - trace!(target: "sync", "Drained {} blocks from {:?}", drained.len(), from); - drained } pub fn clear_peer_download(&mut self, who: &PeerId) { @@ -217,8 +245,8 @@ impl BlockCollection { #[cfg(test)] mod test { use super::{BlockCollection, BlockData, BlockRangeState}; - use crate::message; use libp2p::PeerId; + use sc_network_common::sync::message; use sp_core::H256; use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; @@ -262,66 +290,66 @@ mod test { let peer2 = PeerId::random(); let blocks = generate_blocks(150); - assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(1..41)); - assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(41..81)); - assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0, 1, 200), Some(81..121)); + assert_eq!(bc.needed_blocks(peer0, 40, 150, 0, 1, 200), Some(1..41)); + assert_eq!(bc.needed_blocks(peer1, 40, 150, 0, 1, 200), Some(41..81)); + assert_eq!(bc.needed_blocks(peer2, 40, 150, 0, 1, 200), Some(81..121)); bc.clear_peer_download(&peer1); - bc.insert(41, blocks[41..81].to_vec(), peer1.clone()); - assert_eq!(bc.drain(1), vec![]); - assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(121..151)); + bc.insert(41, blocks[41..81].to_vec(), peer1); + assert_eq!(bc.ready_blocks(1), vec![]); + assert_eq!(bc.needed_blocks(peer1, 40, 150, 0, 1, 200), Some(121..151)); bc.clear_peer_download(&peer0); - bc.insert(1, blocks[1..11].to_vec(), peer0.clone()); + bc.insert(1, blocks[1..11].to_vec(), peer0); - assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(11..41)); + assert_eq!(bc.needed_blocks(peer0, 40, 150, 0, 1, 200), Some(11..41)); assert_eq!( - bc.drain(1), + bc.ready_blocks(1), blocks[1..11] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer0) }) .collect::>() ); bc.clear_peer_download(&peer0); - bc.insert(11, blocks[11..41].to_vec(), peer0.clone()); + bc.insert(11, blocks[11..41].to_vec(), peer0); - let drained = bc.drain(12); + let ready = bc.ready_blocks(12); assert_eq!( - drained[..30], + ready[..30], blocks[11..41] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer0) }) .collect::>()[..] ); assert_eq!( - drained[30..], + ready[30..], blocks[41..81] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer1) }) .collect::>()[..] ); bc.clear_peer_download(&peer2); - assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80, 1, 200), Some(81..121)); + assert_eq!(bc.needed_blocks(peer2, 40, 150, 80, 1, 200), Some(81..121)); bc.clear_peer_download(&peer2); - bc.insert(81, blocks[81..121].to_vec(), peer2.clone()); + bc.insert(81, blocks[81..121].to_vec(), peer2); bc.clear_peer_download(&peer1); - bc.insert(121, blocks[121..150].to_vec(), peer1.clone()); + bc.insert(121, blocks[121..150].to_vec(), peer1); - assert_eq!(bc.drain(80), vec![]); - let drained = bc.drain(81); + assert_eq!(bc.ready_blocks(80), vec![]); + let ready = bc.ready_blocks(81); assert_eq!( - drained[..40], + ready[..40], blocks[81..121] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer2) }) .collect::>()[..] ); assert_eq!( - drained[40..], + ready[40..], blocks[121..150] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer1) }) .collect::>()[..] ); } @@ -337,11 +365,70 @@ mod test { bc.blocks.insert(114305, BlockRangeState::Complete(blocks)); let peer0 = PeerId::random(); - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 000, 1, 200), Some(1..100)); - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200), None); // too far ahead + assert_eq!(bc.needed_blocks(peer0, 128, 10000, 000, 1, 200), Some(1..100)); + assert_eq!(bc.needed_blocks(peer0, 128, 10000, 600, 1, 200), None); // too far ahead assert_eq!( - bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200000), + bc.needed_blocks(peer0, 128, 10000, 600, 1, 200000), Some(100 + 128..100 + 128 + 128) ); } + + #[test] + fn no_duplicate_requests_on_fork() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + let peer = PeerId::random(); + + let blocks = generate_blocks(10); + + // count = 5, peer_best = 50, common = 39, max_parallel = 0, max_ahead = 200 + assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(40..45)); + + // got a response on the request for `40..45` + bc.clear_peer_download(&peer); + bc.insert(40, blocks[..5].to_vec(), peer); + + // our "node" started on a fork, with its current best = 47, which is > common + let ready = bc.ready_blocks(48); + assert_eq!( + ready, + blocks[..5] + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer) }) + .collect::>() + ); + + assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(45..50)); + } + + #[test] + fn clear_queued_subsequent_ranges() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + let peer = PeerId::random(); + + let blocks = generate_blocks(10); + + // Request 2 ranges + assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(40..45)); + assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(45..50)); + + // got a response on the request for `40..50` + bc.clear_peer_download(&peer); + bc.insert(40, blocks.to_vec(), peer); + + // request any blocks starting from 1000 or lower. + let ready = bc.ready_blocks(1000); + assert_eq!( + ready, + blocks + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer) }) + .collect::>() + ); + + bc.clear_queued(&blocks[0].hash); + assert!(bc.blocks.is_empty()); + assert!(bc.queued_blocks.is_empty()); + } } diff --git a/client/network/sync/src/extra_requests.rs b/client/network/sync/src/extra_requests.rs index c684d8e72783e..0506bd542ff3b 100644 --- a/client/network/sync/src/extra_requests.rs +++ b/client/network/sync/src/extra_requests.rs @@ -20,6 +20,7 @@ use crate::{PeerSync, PeerSyncState}; use fork_tree::ForkTree; use libp2p::PeerId; use log::{debug, trace, warn}; +use sc_network_common::sync::metrics::Metrics; use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::{ @@ -56,15 +57,6 @@ pub(crate) struct ExtraRequests { request_type_name: &'static str, } -#[derive(Debug)] -pub struct Metrics { - pub pending_requests: u32, - pub active_requests: u32, - pub importing_requests: u32, - pub failed_requests: u32, - _priv: (), -} - impl ExtraRequests { pub(crate) fn new(request_type_name: &'static str) -> Self { Self { @@ -258,7 +250,6 @@ impl ExtraRequests { active_requests: self.active_requests.len().try_into().unwrap_or(std::u32::MAX), failed_requests: self.failed_requests.len().try_into().unwrap_or(std::u32::MAX), importing_requests: self.importing_requests.len().try_into().unwrap_or(std::u32::MAX), - _priv: (), } } } @@ -455,16 +446,12 @@ mod tests { PeerSyncState::DownloadingJustification(r.0); } - let active = requests - .active_requests - .iter() - .map(|(p, &r)| (p.clone(), r)) - .collect::>(); + let active = requests.active_requests.iter().map(|(&p, &r)| (p, r)).collect::>(); for (peer, req) in &active { assert!(requests.failed_requests.get(req).is_none()); assert!(!requests.pending_requests.contains(req)); - assert!(requests.on_response::<()>(peer.clone(), None).is_none()); + assert!(requests.on_response::<()>(*peer, None).is_none()); assert!(requests.pending_requests.contains(req)); assert_eq!( 1, diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index b027d77827374..aae5f4de353fe 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -30,8 +30,7 @@ pub mod block_request_handler; pub mod blocks; -pub mod message; -pub mod schema; +mod schema; pub mod state; pub mod state_request_handler; pub mod warp; @@ -39,22 +38,28 @@ pub mod warp_request_handler; use crate::{ blocks::BlockCollection, - message::{BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse}, schema::v1::{StateRequest, StateResponse}, - state::{StateDownloadProgress, StateSync}, - warp::{ - EncodedProof, WarpProofImportResult, WarpProofRequest, WarpSync, WarpSyncPhase, - WarpSyncProgress, WarpSyncProvider, - }, + state::StateSync, + warp::{WarpProofImportResult, WarpSync}, }; -use codec::Encode; -use either::Either; +use codec::{Decode, DecodeAll, Encode}; use extra_requests::ExtraRequests; use futures::{stream::FuturesUnordered, task::Poll, Future, FutureExt, StreamExt}; use libp2p::PeerId; use log::{debug, error, info, trace, warn}; +use prost::Message; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; +use sc_network_common::sync::{ + message::{ + BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, + FromBlock, + }, + warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, WarpSyncProvider}, + BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, + OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, + PollBlockAnnounceValidation, SyncMode, SyncState, SyncStatus, +}; use sp_arithmetic::traits::Saturating; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use sp_consensus::{ @@ -71,7 +76,6 @@ use sp_runtime::{ }; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, - fmt, ops::Range, pin::Pin, sync::Arc, @@ -283,15 +287,6 @@ impl PeerSync { } } -/// The sync status of a peer we are trying to sync with -#[derive(Debug)] -pub struct PeerInfo { - /// Their best block hash. - pub best_hash: B::Hash, - /// Their best block number. - pub best_number: NumberFor, -} - struct ForkTarget { number: NumberFor, parent_hash: Option, @@ -330,108 +325,6 @@ impl PeerSyncState { } } -/// Reported sync state. -#[derive(Clone, Eq, PartialEq, Debug)] -pub enum SyncState { - /// Initial sync is complete, keep-up sync is active. - Idle, - /// Actively catching up with the chain. - Downloading, -} - -/// Syncing status and statistics. -#[derive(Clone)] -pub struct Status { - /// Current global sync state. - pub state: SyncState, - /// Target sync block number. - pub best_seen_block: Option>, - /// Number of peers participating in syncing. - pub num_peers: u32, - /// Number of blocks queued for import - pub queued_blocks: u32, - /// State sync status in progress, if any. - pub state_sync: Option, - /// Warp sync in progress, if any. - pub warp_sync: Option>, -} - -/// A peer did not behave as expected and should be reported. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct BadPeer(pub PeerId, pub sc_peerset::ReputationChange); - -impl fmt::Display for BadPeer { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Bad peer {}; Reputation change: {:?}", self.0, self.1) - } -} - -impl std::error::Error for BadPeer {} - -/// Result of [`ChainSync::on_block_data`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum OnBlockData { - /// The block should be imported. - Import(BlockOrigin, Vec>), - /// A new block request needs to be made to the given peer. - Request(PeerId, BlockRequest), -} - -impl OnBlockData { - /// Returns `self` as request. - #[cfg(test)] - fn into_request(self) -> Option<(PeerId, BlockRequest)> { - if let Self::Request(peer, req) = self { - Some((peer, req)) - } else { - None - } - } -} - -/// Result of [`ChainSync::on_state_data`]. -#[derive(Debug)] -pub enum OnStateData { - /// The block and state that should be imported. - Import(BlockOrigin, IncomingBlock), - /// A new state request needs to be made to the given peer. - Continue, -} - -/// Result of [`ChainSync::poll_block_announce_validation`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum PollBlockAnnounceValidation { - /// The announcement failed at validation. - /// - /// The peer reputation should be decreased. - Failure { - /// Who sent the processed block announcement? - who: PeerId, - /// Should the peer be disconnected? - disconnect: bool, - }, - /// The announcement does not require further handling. - Nothing { - /// Who sent the processed block announcement? - who: PeerId, - /// Was this their new best block? - is_best: bool, - /// The announcement. - announce: BlockAnnounce, - }, - /// The announcement header should be imported. - ImportHeader { - /// Who sent the processed block announcement? - who: PeerId, - /// Was this their new best block? - is_best: bool, - /// The announcement. - announce: BlockAnnounce, - }, - /// The block announcement should be skipped. - Skip, -} - /// Result of [`ChainSync::block_announce_validation`]. #[derive(Debug, Clone, PartialEq, Eq)] enum PreValidateBlockAnnounce { @@ -467,28 +360,6 @@ enum PreValidateBlockAnnounce { Skip, } -/// Result of [`ChainSync::on_block_justification`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum OnBlockJustification { - /// The justification needs no further handling. - Nothing, - /// The justification should be imported. - Import { peer: PeerId, hash: B::Hash, number: NumberFor, justifications: Justifications }, -} - -/// Operation mode. -#[derive(Debug, PartialEq, Eq)] -pub enum SyncMode { - // Sync headers only - Light, - // Sync headers and block bodies - Full, - // Sync headers and the last finalied state - LightState { storage_chain_mode: bool, skip_proofs: bool }, - // Warp sync mode. - Warp, -} - /// Result of [`ChainSync::has_slot_for_block_announce_validation`]. enum HasSlotForBlockAnnounceValidation { /// Yes, there is a slot for the block announce validation. @@ -499,7 +370,7 @@ enum HasSlotForBlockAnnounceValidation { MaximumPeerSlotsReached, } -impl ChainSync +impl ChainSyncT for ChainSync where B: BlockT, Client: HeaderBackend @@ -510,88 +381,14 @@ where + Sync + 'static, { - /// Create a new instance. - pub fn new( - mode: SyncMode, - client: Arc, - block_announce_validator: Box + Send>, - max_parallel_downloads: u32, - warp_sync_provider: Option>>, - ) -> Result { - let mut sync = Self { - client, - peers: HashMap::new(), - blocks: BlockCollection::new(), - best_queued_hash: Default::default(), - best_queued_number: Zero::zero(), - extra_justifications: ExtraRequests::new("justification"), - mode, - queue_blocks: Default::default(), - fork_targets: Default::default(), - allowed_requests: Default::default(), - block_announce_validator, - max_parallel_downloads, - downloaded_blocks: 0, - block_announce_validation: Default::default(), - block_announce_validation_per_peer_stats: Default::default(), - state_sync: None, - warp_sync: None, - warp_sync_provider, - import_existing: false, - gap_sync: None, - }; - sync.reset_sync_start_point()?; - Ok(sync) - } - - fn required_block_attributes(&self) -> BlockAttributes { - match self.mode { - SyncMode::Full => - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, - SyncMode::Light => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, - SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, - SyncMode::LightState { storage_chain_mode: true, .. } => - BlockAttributes::HEADER | - BlockAttributes::JUSTIFICATION | - BlockAttributes::INDEXED_BODY, - } - } - - fn skip_execution(&self) -> bool { - match self.mode { - SyncMode::Full => false, - SyncMode::Light => true, - SyncMode::LightState { .. } => true, - SyncMode::Warp => true, - } - } - - /// Returns the state of the sync of the given peer. - /// - /// Returns `None` if the peer is unknown. - pub fn peer_info(&self, who: &PeerId) -> Option> { + fn peer_info(&self, who: &PeerId) -> Option> { self.peers .get(who) .map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number }) } - /// Returns the best seen block. - fn best_seen(&self) -> Option> { - let mut best_seens = self.peers.values().map(|p| p.best_number).collect::>(); - - if best_seens.is_empty() { - None - } else { - let middle = best_seens.len() / 2; - - // Not the "perfect median" when we have an even number of peers. - Some(*best_seens.select_nth_unstable(middle).1) - } - } - /// Returns the current sync status. - pub fn status(&self) -> Status { + fn status(&self) -> SyncStatus { let best_seen = self.best_seen(); let sync_state = if let Some(n) = best_seen { // A chain is classified as downloading if the provided best block is @@ -617,7 +414,7 @@ where _ => None, }; - Status { + SyncStatus { state: sync_state, best_seen_block: best_seen, num_peers: self.peers.len() as u32, @@ -627,29 +424,22 @@ where } } - /// Number of active forks requests. This includes - /// requests that are pending or could be issued right away. - pub fn num_sync_requests(&self) -> usize { + fn num_sync_requests(&self) -> usize { self.fork_targets .values() .filter(|f| f.number <= self.best_queued_number) .count() } - /// Number of downloaded blocks. - pub fn num_downloaded_blocks(&self) -> usize { + fn num_downloaded_blocks(&self) -> usize { self.downloaded_blocks } - /// Returns the current number of peers stored within this state machine. - pub fn num_peers(&self) -> usize { + fn num_peers(&self) -> usize { self.peers.len() } - /// Handle a new connected peer. - /// - /// Call this method whenever we connect to a new peer. - pub fn new_peer( + fn new_peer( &mut self, who: PeerId, best_hash: B::Hash, @@ -773,27 +563,22 @@ where } } - /// Signal that a new best block has been imported. - /// `ChainSync` state with that information. - pub fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { + fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { self.on_block_queued(best_hash, best_number); } - /// Schedule a justification request for the given block. - pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { let client = &self.client; self.extra_justifications .schedule((*hash, number), |base, block| is_descendent_of(&**client, base, block)) } - /// Clear all pending justification requests. - pub fn clear_justification_requests(&mut self) { + fn clear_justification_requests(&mut self) { self.extra_justifications.reset(); } - /// Request syncing for the given block from given set of peers. // The implementation is similar to on_block_announce with unknown parent hash. - pub fn set_sync_fork_request( + fn set_sync_fork_request( &mut self, mut peers: Vec, hash: &B::Hash, @@ -845,13 +630,12 @@ where .extend(peers); } - /// Get an iterator over all scheduled justification requests. - pub fn justification_requests( + fn justification_requests( &mut self, - ) -> impl Iterator)> + '_ { + ) -> Box)> + '_> { let peers = &mut self.peers; let mut matcher = self.extra_justifications.matcher(); - std::iter::from_fn(move || { + Box::new(std::iter::from_fn(move || { if let Some((peer, request)) = matcher.next(peers) { peers .get_mut(&peer) @@ -859,33 +643,32 @@ where "`Matcher::next` guarantees the `PeerId` comes from the given peers; qed", ) .state = PeerSyncState::DownloadingJustification(request.0); - let req = message::generic::BlockRequest { + let req = BlockRequest:: { id: 0, fields: BlockAttributes::JUSTIFICATION, - from: message::FromBlock::Hash(request.0), + from: FromBlock::Hash(request.0), to: None, - direction: message::Direction::Ascending, + direction: Direction::Ascending, max: Some(1), }; Some((peer, req)) } else { None } - }) + })) } - /// Get an iterator over all block requests of all peers. - pub fn block_requests(&mut self) -> impl Iterator)> + '_ { + fn block_requests(&mut self) -> Box)> + '_> { if self.allowed_requests.is_empty() || self.state_sync.is_some() || self.mode == SyncMode::Warp { - return Either::Left(std::iter::empty()) + return Box::new(std::iter::empty()) } if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { trace!(target: "sync", "Too many blocks in the queue."); - return Either::Left(std::iter::empty()) + return Box::new(std::iter::empty()) } let major_sync = self.status().state == SyncState::Downloading; let attrs = self.required_block_attributes(); @@ -982,11 +765,10 @@ where None } }); - Either::Right(iter) + Box::new(iter) } - /// Get a state request, if any. - pub fn state_request(&mut self) -> Option<(PeerId, StateRequest)> { + fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { if self.allowed_requests.is_empty() { return None } @@ -1007,7 +789,7 @@ where let request = sync.next_request(); trace!(target: "sync", "New StateRequest for {}: {:?}", id, request); self.allowed_requests.clear(); - return Some((*id, request)) + return Some((*id, OpaqueStateRequest(Box::new(request)))) } } } @@ -1023,7 +805,7 @@ where trace!(target: "sync", "New StateRequest for {}: {:?}", id, request); peer.state = PeerSyncState::DownloadingState; self.allowed_requests.clear(); - return Some((*id, request)) + return Some((*id, OpaqueStateRequest(Box::new(request)))) } } } @@ -1031,8 +813,7 @@ where None } - /// Get a warp sync request, if any. - pub fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { + fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { if let Some(sync) = &self.warp_sync { if self.allowed_requests.is_empty() || sync.is_complete() || @@ -1063,14 +844,7 @@ where None } - /// Handle a response from the remote to a block request that we made. - /// - /// `request` must be the original request that triggered `response`. - /// or `None` if data comes from the block announcement. - /// - /// If this corresponds to a valid block, this outputs the block that - /// must be imported in the import queue. - pub fn on_block_data( + fn on_block_data( &mut self, who: &PeerId, request: Option>, @@ -1080,10 +854,7 @@ where let mut gap = false; let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(who) { let mut blocks = response.blocks; - if request - .as_ref() - .map_or(false, |r| r.direction == message::Direction::Descending) - { + if request.as_ref().map_or(false, |r| r.direction == Direction::Descending) { trace!(target: "sync", "Reversing incoming block list"); blocks.reverse() } @@ -1098,7 +869,7 @@ where { self.blocks.insert(start_block, blocks, *who); } - self.drain_blocks() + self.ready_blocks() }, PeerSyncState::DownloadingGap(_) => { peer.state = PeerSyncState::Available; @@ -1112,7 +883,7 @@ where gap = true; let blocks: Vec<_> = gap_sync .blocks - .drain(gap_sync.best_queued_number + One::one()) + .ready_blocks(gap_sync.best_queued_number + One::one()) .into_iter() .map(|block_data| { let justifications = @@ -1297,14 +1068,20 @@ where Ok(self.validate_and_queue_blocks(new_blocks, gap)) } - /// Handle a response from the remote to a state request that we made. - /// - /// Returns next request if any. - pub fn on_state_data( + fn on_state_data( &mut self, who: &PeerId, - response: StateResponse, + response: OpaqueStateResponse, ) -> Result, BadPeer> { + let response: Box = response.0.downcast().map_err(|_error| { + error!( + target: "sync", + "Failed to downcast opaque state response, this is an implementation bug." + ); + + BadPeer(*who, rep::BAD_RESPONSE) + })?; + if let Some(peer) = self.peers.get_mut(who) { if let PeerSyncState::DownloadingState = peer.state { peer.state = PeerSyncState::Available; @@ -1319,7 +1096,7 @@ where response.entries.len(), response.proof.len(), ); - sync.import(response) + sync.import(*response) } else if let Some(sync) = &mut self.warp_sync { debug!( target: "sync", @@ -1328,7 +1105,7 @@ where response.entries.len(), response.proof.len(), ); - sync.import_state(response) + sync.import_state(*response) } else { debug!(target: "sync", "Ignored obsolete state response from {}", who); return Err(BadPeer(*who, rep::NOT_REQUESTED)) @@ -1360,14 +1137,7 @@ where } } - /// Handle a response from the remote to a warp proof request that we made. - /// - /// Returns next request. - pub fn on_warp_sync_data( - &mut self, - who: &PeerId, - response: EncodedProof, - ) -> Result<(), BadPeer> { + fn on_warp_sync_data(&mut self, who: &PeerId, response: EncodedProof) -> Result<(), BadPeer> { if let Some(peer) = self.peers.get_mut(who) { if let PeerSyncState::DownloadingWarpProof = peer.state { peer.state = PeerSyncState::Available; @@ -1396,51 +1166,7 @@ where } } - fn validate_and_queue_blocks( - &mut self, - mut new_blocks: Vec>, - gap: bool, - ) -> OnBlockData { - let orig_len = new_blocks.len(); - new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); - if new_blocks.len() != orig_len { - debug!( - target: "sync", - "Ignoring {} blocks that are already queued", - orig_len - new_blocks.len(), - ); - } - - let origin = if !gap && self.status().state != SyncState::Downloading { - BlockOrigin::NetworkBroadcast - } else { - BlockOrigin::NetworkInitialSync - }; - - if let Some((h, n)) = new_blocks - .last() - .and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) - { - trace!( - target:"sync", - "Accepted {} blocks ({:?}) with origin {:?}", - new_blocks.len(), - h, - origin, - ); - self.on_block_queued(h, n) - } - self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); - OnBlockData::Import(origin, new_blocks) - } - - /// Handle a response from the remote to a justification request that we made. - /// - /// `request` must be the original request that triggered `response`. - /// - /// Returns `Some` if this produces a justification that must be imported - /// into the import queue. - pub fn on_block_justification( + fn on_block_justification( &mut self, who: PeerId, response: BlockResponse, @@ -1495,18 +1221,12 @@ where Ok(OnBlockJustification::Nothing) } - /// A batch of blocks have been processed, with or without errors. - /// - /// Call this when a batch of blocks have been processed by the import - /// queue, with or without errors. - /// - /// `peer_info` is passed in case of a restart. - pub fn on_blocks_processed( + fn on_blocks_processed( &mut self, imported: usize, count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) -> impl Iterator), BadPeer>> { + ) -> Box), BadPeer>>> { trace!(target: "sync", "Imported {} of {}", imported, count); let mut output = Vec::new(); @@ -1514,10 +1234,14 @@ where let mut has_error = false; for (_, hash) in &results { self.queue_blocks.remove(hash); + self.blocks.clear_queued(hash); + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_queued(hash); + } } for (result, hash) in results { if has_error { - continue + break } if result.is_err() { @@ -1525,11 +1249,10 @@ where } match result { - Ok(BlockImportStatus::ImportedKnown(number, who)) => { - if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { - peer.update_common_number(number); - } - }, + Ok(BlockImportStatus::ImportedKnown(number, who)) => + if let Some(peer) = who { + self.update_peer_common_number(&peer, number); + }, Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { if aux.clear_justification_requests { trace!( @@ -1558,8 +1281,8 @@ where } } - if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { - peer.update_common_number(number); + if let Some(peer) = who { + self.update_peer_common_number(&peer, number); } let state_sync_complete = self.state_sync.as_ref().map_or(false, |s| s.target() == hash); @@ -1645,20 +1368,17 @@ where } self.allowed_requests.set_all(); - output.into_iter() + Box::new(output.into_iter()) } - /// Call this when a justification has been processed by the import queue, - /// with or without errors. - pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { + fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; self.extra_justifications .try_finalize_root((hash, number), finalization_result, true); self.allowed_requests.set_all(); } - /// Notify about finalization of the given block. - pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { + fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { let client = &self.client; let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { is_descendent_of(&**client, base, block) @@ -1696,115 +1416,37 @@ where } } - /// Called when a block has been queued for import. - /// - /// Updates our internal state for best queued block and then goes - /// through all peers to update our view of their state as well. - fn on_block_queued(&mut self, hash: &B::Hash, number: NumberFor) { - if self.fork_targets.remove(hash).is_some() { - trace!(target: "sync", "Completed fork sync {:?}", hash); - } - if let Some(gap_sync) = &mut self.gap_sync { - if number > gap_sync.best_queued_number && number <= gap_sync.target { - gap_sync.best_queued_number = number; - } - } - if number > self.best_queued_number { - self.best_queued_number = number; - self.best_queued_hash = *hash; - // Update common blocks - for (n, peer) in self.peers.iter_mut() { - if let PeerSyncState::AncestorSearch { .. } = peer.state { - // Wait for ancestry search to complete first. - continue + fn push_block_announce_validation( + &mut self, + who: PeerId, + hash: B::Hash, + announce: BlockAnnounce, + is_best: bool, + ) { + let header = &announce.header; + let number = *header.number(); + debug!( + target: "sync", + "Pre-validating received block announcement {:?} with number {:?} from {}", + hash, + number, + who, + ); + + if number.is_zero() { + self.block_announce_validation.push( + async move { + warn!( + target: "sync", + "💔 Ignored genesis block (#0) announcement from {}: {}", + who, + hash, + ); + PreValidateBlockAnnounce::Skip } - let new_common_number = - if peer.best_number >= number { number } else { peer.best_number }; - trace!( - target: "sync", - "Updating peer {} info, ours={}, common={}->{}, their best={}", - n, - number, - peer.common_number, - new_common_number, - peer.best_number, - ); - peer.common_number = new_common_number; - } - } - self.allowed_requests.set_all(); - } - - /// Checks if there is a slot for a block announce validation. - /// - /// The total number and the number per peer of concurrent block announce validations - /// is capped. - /// - /// Returns [`HasSlotForBlockAnnounceValidation`] to inform about the result. - /// - /// # Note - /// - /// It is *required* to call [`Self::peer_block_announce_validation_finished`] when the - /// validation is finished to clear the slot. - fn has_slot_for_block_announce_validation( - &mut self, - peer: &PeerId, - ) -> HasSlotForBlockAnnounceValidation { - if self.block_announce_validation.len() >= MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS { - return HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached - } - - match self.block_announce_validation_per_peer_stats.entry(*peer) { - Entry::Vacant(entry) => { - entry.insert(1); - HasSlotForBlockAnnounceValidation::Yes - }, - Entry::Occupied(mut entry) => { - if *entry.get() < MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER { - *entry.get_mut() += 1; - HasSlotForBlockAnnounceValidation::Yes - } else { - HasSlotForBlockAnnounceValidation::MaximumPeerSlotsReached - } - }, - } - } - - /// Push a block announce validation. - /// - /// It is required that [`ChainSync::poll_block_announce_validation`] is called - /// to check for finished block announce validations. - pub fn push_block_announce_validation( - &mut self, - who: PeerId, - hash: B::Hash, - announce: BlockAnnounce, - is_best: bool, - ) { - let header = &announce.header; - let number = *header.number(); - debug!( - target: "sync", - "Pre-validating received block announcement {:?} with number {:?} from {}", - hash, - number, - who, - ); - - if number.is_zero() { - self.block_announce_validation.push( - async move { - warn!( - target: "sync", - "💔 Ignored genesis block (#0) announcement from {}: {}", - who, - hash, - ); - PreValidateBlockAnnounce::Skip - } - .boxed(), - ); - return + .boxed(), + ); + return } // Check if there is a slot for this block announce validation. @@ -1877,16 +1519,7 @@ where ); } - /// Poll block announce validation. - /// - /// Block announce validations can be pushed by using - /// [`ChainSync::push_block_announce_validation`]. - /// - /// This should be polled until it returns [`Poll::Pending`]. - /// - /// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to - /// import passed header (call `on_block_data`). The network request isn't sent in this case. - pub fn poll_block_announce_validation( + fn poll_block_announce_validation( &mut self, cx: &mut std::task::Context, ) -> Poll> { @@ -1899,6 +1532,353 @@ where } } + fn peer_disconnected(&mut self, who: &PeerId) -> Option> { + self.blocks.clear_peer_download(who); + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_peer_download(who) + } + self.peers.remove(who); + self.extra_justifications.peer_disconnected(who); + self.allowed_requests.set_all(); + self.fork_targets.retain(|_, target| { + target.peers.remove(who); + !target.peers.is_empty() + }); + let blocks = self.ready_blocks(); + (!blocks.is_empty()).then(|| self.validate_and_queue_blocks(blocks, false)) + } + + fn metrics(&self) -> Metrics { + Metrics { + queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), + fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), + justifications: self.extra_justifications.metrics(), + } + } + + /// Create implementation-specific block request. + fn create_opaque_block_request(&self, request: &BlockRequest) -> OpaqueBlockRequest { + OpaqueBlockRequest(Box::new(schema::v1::BlockRequest { + fields: request.fields.to_be_u32(), + from_block: match request.from { + FromBlock::Hash(h) => Some(schema::v1::block_request::FromBlock::Hash(h.encode())), + FromBlock::Number(n) => + Some(schema::v1::block_request::FromBlock::Number(n.encode())), + }, + to_block: request.to.map(|h| h.encode()).unwrap_or_default(), + direction: request.direction as i32, + max_blocks: request.max.unwrap_or(0), + support_multiple_justifications: true, + })) + } + + fn encode_block_request(&self, request: &OpaqueBlockRequest) -> Result, String> { + let request: &schema::v1::BlockRequest = request.0.downcast_ref().ok_or_else(|| { + "Failed to downcast opaque block response during encoding, this is an \ + implementation bug." + .to_string() + })?; + + Ok(request.encode_to_vec()) + } + + fn decode_block_response(&self, response: &[u8]) -> Result { + let response = schema::v1::BlockResponse::decode(response) + .map_err(|error| format!("Failed to decode block response: {error}"))?; + + Ok(OpaqueBlockResponse(Box::new(response))) + } + + fn block_response_into_blocks( + &self, + request: &BlockRequest, + response: OpaqueBlockResponse, + ) -> Result>, String> { + let response: Box = response.0.downcast().map_err(|_error| { + "Failed to downcast opaque block response during encoding, this is an \ + implementation bug." + .to_string() + })?; + + response + .blocks + .into_iter() + .map(|block_data| { + Ok(BlockData:: { + hash: Decode::decode(&mut block_data.hash.as_ref())?, + header: if !block_data.header.is_empty() { + Some(Decode::decode(&mut block_data.header.as_ref())?) + } else { + None + }, + body: if request.fields.contains(BlockAttributes::BODY) { + Some( + block_data + .body + .iter() + .map(|body| Decode::decode(&mut body.as_ref())) + .collect::, _>>()?, + ) + } else { + None + }, + indexed_body: if request.fields.contains(BlockAttributes::INDEXED_BODY) { + Some(block_data.indexed_body) + } else { + None + }, + receipt: if !block_data.receipt.is_empty() { + Some(block_data.receipt) + } else { + None + }, + message_queue: if !block_data.message_queue.is_empty() { + Some(block_data.message_queue) + } else { + None + }, + justification: if !block_data.justification.is_empty() { + Some(block_data.justification) + } else if block_data.is_empty_justification { + Some(Vec::new()) + } else { + None + }, + justifications: if !block_data.justifications.is_empty() { + Some(DecodeAll::decode_all(&mut block_data.justifications.as_ref())?) + } else { + None + }, + }) + }) + .collect::>() + .map_err(|error: codec::Error| error.to_string()) + } + + fn encode_state_request(&self, request: &OpaqueStateRequest) -> Result, String> { + let request: &StateRequest = request.0.downcast_ref().ok_or_else(|| { + "Failed to downcast opaque state response during encoding, this is an \ + implementation bug." + .to_string() + })?; + + Ok(request.encode_to_vec()) + } + + fn decode_state_response(&self, response: &[u8]) -> Result { + let response = StateResponse::decode(response) + .map_err(|error| format!("Failed to decode state response: {error}"))?; + + Ok(OpaqueStateResponse(Box::new(response))) + } +} + +impl ChainSync +where + Self: ChainSyncT, + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + /// Create a new instance. + pub fn new( + mode: SyncMode, + client: Arc, + block_announce_validator: Box + Send>, + max_parallel_downloads: u32, + warp_sync_provider: Option>>, + ) -> Result { + let mut sync = Self { + client, + peers: HashMap::new(), + blocks: BlockCollection::new(), + best_queued_hash: Default::default(), + best_queued_number: Zero::zero(), + extra_justifications: ExtraRequests::new("justification"), + mode, + queue_blocks: Default::default(), + fork_targets: Default::default(), + allowed_requests: Default::default(), + block_announce_validator, + max_parallel_downloads, + downloaded_blocks: 0, + block_announce_validation: Default::default(), + block_announce_validation_per_peer_stats: Default::default(), + state_sync: None, + warp_sync: None, + warp_sync_provider, + import_existing: false, + gap_sync: None, + }; + sync.reset_sync_start_point()?; + Ok(sync) + } + + /// Returns the best seen block number if we don't have that block yet, `None` otherwise. + fn best_seen(&self) -> Option> { + let mut best_seens = self.peers.values().map(|p| p.best_number).collect::>(); + + if best_seens.is_empty() { + None + } else { + let middle = best_seens.len() / 2; + + // Not the "perfect median" when we have an even number of peers. + let median = *best_seens.select_nth_unstable(middle).1; + if median > self.best_queued_number { + Some(median) + } else { + None + } + } + } + + fn required_block_attributes(&self) -> BlockAttributes { + match self.mode { + SyncMode::Full => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::Light => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, + SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::LightState { storage_chain_mode: true, .. } => + BlockAttributes::HEADER | + BlockAttributes::JUSTIFICATION | + BlockAttributes::INDEXED_BODY, + } + } + + fn skip_execution(&self) -> bool { + match self.mode { + SyncMode::Full => false, + SyncMode::Light => true, + SyncMode::LightState { .. } => true, + SyncMode::Warp => true, + } + } + + fn validate_and_queue_blocks( + &mut self, + mut new_blocks: Vec>, + gap: bool, + ) -> OnBlockData { + let orig_len = new_blocks.len(); + new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); + if new_blocks.len() != orig_len { + debug!( + target: "sync", + "Ignoring {} blocks that are already queued", + orig_len - new_blocks.len(), + ); + } + + let origin = if !gap && self.status().state != SyncState::Downloading { + BlockOrigin::NetworkBroadcast + } else { + BlockOrigin::NetworkInitialSync + }; + + if let Some((h, n)) = new_blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) + { + trace!( + target:"sync", + "Accepted {} blocks ({:?}) with origin {:?}", + new_blocks.len(), + h, + origin, + ); + self.on_block_queued(h, n) + } + self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); + OnBlockData::Import(origin, new_blocks) + } + + fn update_peer_common_number(&mut self, peer_id: &PeerId, new_common: NumberFor) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.update_common_number(new_common); + } + } + + /// Called when a block has been queued for import. + /// + /// Updates our internal state for best queued block and then goes + /// through all peers to update our view of their state as well. + fn on_block_queued(&mut self, hash: &B::Hash, number: NumberFor) { + if self.fork_targets.remove(hash).is_some() { + trace!(target: "sync", "Completed fork sync {:?}", hash); + } + if let Some(gap_sync) = &mut self.gap_sync { + if number > gap_sync.best_queued_number && number <= gap_sync.target { + gap_sync.best_queued_number = number; + } + } + if number > self.best_queued_number { + self.best_queued_number = number; + self.best_queued_hash = *hash; + // Update common blocks + for (n, peer) in self.peers.iter_mut() { + if let PeerSyncState::AncestorSearch { .. } = peer.state { + // Wait for ancestry search to complete first. + continue + } + let new_common_number = + if peer.best_number >= number { number } else { peer.best_number }; + trace!( + target: "sync", + "Updating peer {} info, ours={}, common={}->{}, their best={}", + n, + number, + peer.common_number, + new_common_number, + peer.best_number, + ); + peer.common_number = new_common_number; + } + } + self.allowed_requests.set_all(); + } + + /// Checks if there is a slot for a block announce validation. + /// + /// The total number and the number per peer of concurrent block announce validations + /// is capped. + /// + /// Returns [`HasSlotForBlockAnnounceValidation`] to inform about the result. + /// + /// # Note + /// + /// It is *required* to call [`Self::peer_block_announce_validation_finished`] when the + /// validation is finished to clear the slot. + fn has_slot_for_block_announce_validation( + &mut self, + peer: &PeerId, + ) -> HasSlotForBlockAnnounceValidation { + if self.block_announce_validation.len() >= MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS { + return HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached + } + + match self.block_announce_validation_per_peer_stats.entry(*peer) { + Entry::Vacant(entry) => { + entry.insert(1); + HasSlotForBlockAnnounceValidation::Yes + }, + Entry::Occupied(mut entry) => { + if *entry.get() < MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER { + *entry.get_mut() += 1; + HasSlotForBlockAnnounceValidation::Yes + } else { + HasSlotForBlockAnnounceValidation::MaximumPeerSlotsReached + } + }, + } + } + /// Should be called when a block announce validation is finished, to update the slots /// of the peer that send the block announce. fn peer_block_announce_validation_finished( @@ -1993,11 +1973,11 @@ where // is either one further ahead or it's the one they just announced, if we know about it. if is_best { if known && self.best_queued_number >= number { - peer.update_common_number(number); + self.update_peer_common_number(&who, number); } else if announce.header.parent_hash() == &self.best_queued_hash || known_parent && self.best_queued_number >= number { - peer.update_common_number(number - One::one()); + self.update_peer_common_number(&who, number - One::one()); } } self.allowed_requests.add(&who); @@ -2056,29 +2036,6 @@ where PollBlockAnnounceValidation::Nothing { is_best, who, announce } } - /// Call when a peer has disconnected. - /// Canceled obsolete block request may result in some blocks being ready for - /// import, so this functions checks for such blocks and returns them. - pub fn peer_disconnected(&mut self, who: &PeerId) -> Option> { - self.blocks.clear_peer_download(who); - if let Some(gap_sync) = &mut self.gap_sync { - gap_sync.blocks.clear_peer_download(who) - } - self.peers.remove(who); - self.extra_justifications.peer_disconnected(who); - self.allowed_requests.set_all(); - self.fork_targets.retain(|_, target| { - target.peers.remove(who); - !target.peers.is_empty() - }); - let blocks = self.drain_blocks(); - if !blocks.is_empty() { - Some(self.validate_and_queue_blocks(blocks, false)) - } else { - None - } - } - /// Restart the sync process. This will reset all pending block requests and return an iterator /// of new block requests to make to peers. Peers that were downloading finality data (i.e. /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. @@ -2181,20 +2138,10 @@ where .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) } - /// Return some key metrics. - pub fn metrics(&self) -> Metrics { - Metrics { - queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), - fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), - justifications: self.extra_justifications.metrics(), - _priv: (), - } - } - - /// Drain the downloaded block set up to the first gap. - fn drain_blocks(&mut self) -> Vec> { + /// Get the set of downloaded blocks that are ready to be queued for import. + fn ready_blocks(&mut self) -> Vec> { self.blocks - .drain(self.best_queued_number + One::one()) + .ready_blocks(self.best_queued_number + One::one()) .into_iter() .map(|block_data| { let justifications = block_data @@ -2228,23 +2175,15 @@ fn legacy_justification_mapping( justification.map(|just| (*b"FRNK", just).into()) } -#[derive(Debug)] -pub struct Metrics { - pub queued_blocks: u32, - pub fork_targets: u32, - pub justifications: extra_requests::Metrics, - _priv: (), -} - /// Request the ancestry for a block. Sends a request for header and justification for the given /// block number. Used during ancestry search. fn ancestry_request(block: NumberFor) -> BlockRequest { - message::generic::BlockRequest { + BlockRequest:: { id: 0, fields: BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, - from: message::FromBlock::Number(block), + from: FromBlock::Number(block), to: None, - direction: message::Direction::Ascending, + direction: Direction::Ascending, max: Some(1), } } @@ -2322,7 +2261,7 @@ fn peer_block_request( id: &PeerId, peer: &PeerSync, blocks: &mut BlockCollection, - attrs: message::BlockAttributes, + attrs: BlockAttributes, max_parallel_downloads: u32, finalized: NumberFor, best_num: NumberFor, @@ -2350,17 +2289,17 @@ fn peer_block_request( let last = range.end.saturating_sub(One::one()); let from = if peer.best_number == last { - message::FromBlock::Hash(peer.best_hash) + FromBlock::Hash(peer.best_hash) } else { - message::FromBlock::Number(last) + FromBlock::Number(last) }; - let request = message::generic::BlockRequest { + let request = BlockRequest:: { id: 0, fields: attrs, from, to: None, - direction: message::Direction::Descending, + direction: Direction::Descending, max: Some((range.end - range.start).saturated_into::()), }; @@ -2372,7 +2311,7 @@ fn peer_gap_block_request( id: &PeerId, peer: &PeerSync, blocks: &mut BlockCollection, - attrs: message::BlockAttributes, + attrs: BlockAttributes, target: NumberFor, common_number: NumberFor, ) -> Option<(Range>, BlockRequest)> { @@ -2387,14 +2326,14 @@ fn peer_gap_block_request( // The end is not part of the range. let last = range.end.saturating_sub(One::one()); - let from = message::FromBlock::Number(last); + let from = FromBlock::Number(last); - let request = message::generic::BlockRequest { + let request = BlockRequest:: { id: 0, fields: attrs, from, to: None, - direction: message::Direction::Descending, + direction: Direction::Descending, max: Some((range.end - range.start).saturated_into::()), }; Some((range, request)) @@ -2406,7 +2345,7 @@ fn fork_sync_request( targets: &mut HashMap>, best_num: NumberFor, finalized: NumberFor, - attributes: message::BlockAttributes, + attributes: BlockAttributes, check_block: impl Fn(&B::Hash) -> BlockStatus, ) -> Option<(B::Hash, BlockRequest)> { targets.retain(|hash, r| { @@ -2439,12 +2378,12 @@ fn fork_sync_request( trace!(target: "sync", "Downloading requested fork {:?} from {}, {} blocks", hash, id, count); return Some(( *hash, - message::generic::BlockRequest { + BlockRequest:: { id: 0, fields: attributes, - from: message::FromBlock::Hash(*hash), + from: FromBlock::Hash(*hash), to: None, - direction: message::Direction::Descending, + direction: Direction::Descending, max: Some(count), }, )) @@ -2479,7 +2418,7 @@ where /// /// It is expected that `blocks` are in ascending order. fn validate_blocks( - blocks: &Vec>, + blocks: &Vec>, who: &PeerId, request: Option>, ) -> Result>, BadPeer> { @@ -2496,16 +2435,13 @@ fn validate_blocks( return Err(BadPeer(*who, rep::NOT_REQUESTED)) } - let block_header = if request.direction == message::Direction::Descending { - blocks.last() - } else { - blocks.first() - } - .and_then(|b| b.header.as_ref()); + let block_header = + if request.direction == Direction::Descending { blocks.last() } else { blocks.first() } + .and_then(|b| b.header.as_ref()); let expected_block = block_header.as_ref().map_or(false, |h| match request.from { - message::FromBlock::Hash(hash) => h.hash() == hash, - message::FromBlock::Number(n) => h.number() == &n, + FromBlock::Hash(hash) => h.hash() == hash, + FromBlock::Number(n) => h.number() == &n, }); if !expected_block { @@ -2519,7 +2455,7 @@ fn validate_blocks( return Err(BadPeer(*who, rep::NOT_REQUESTED)) } - if request.fields.contains(message::BlockAttributes::HEADER) && + if request.fields.contains(BlockAttributes::HEADER) && blocks.iter().any(|b| b.header.is_none()) { trace!( @@ -2531,8 +2467,7 @@ fn validate_blocks( return Err(BadPeer(*who, rep::BAD_RESPONSE)) } - if request.fields.contains(message::BlockAttributes::BODY) && - blocks.iter().any(|b| b.body.is_none()) + if request.fields.contains(BlockAttributes::BODY) && blocks.iter().any(|b| b.body.is_none()) { trace!( target: "sync", @@ -2583,13 +2518,10 @@ fn validate_blocks( #[cfg(test)] mod test { - use super::{ - message::{BlockState, FromBlock}, - *, - }; - use crate::message::BlockData; + use super::*; use futures::{executor::block_on, future::poll_fn}; use sc_block_builder::BlockBuilderProvider; + use sc_network_common::sync::message::{BlockData, BlockState, FromBlock}; use sp_blockchain::HeaderBackend; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use substrate_test_runtime_client::{ @@ -2682,15 +2614,15 @@ mod test { let (b1_hash, b1_number) = new_blocks(50); // add 2 peers at blocks that we don't have locally - sync.new_peer(peer_id1.clone(), Hash::random(), 42).unwrap(); - sync.new_peer(peer_id2.clone(), Hash::random(), 10).unwrap(); + sync.new_peer(peer_id1, Hash::random(), 42).unwrap(); + sync.new_peer(peer_id2, Hash::random(), 10).unwrap(); // we wil send block requests to these peers // for these blocks we don't know about assert!(sync.block_requests().all(|(p, _)| { *p == peer_id1 || *p == peer_id2 })); // add a new peer at a known block - sync.new_peer(peer_id3.clone(), b1_hash, b1_number).unwrap(); + sync.new_peer(peer_id3, b1_hash, b1_number).unwrap(); // we request a justification for a block we have locally sync.request_justification(&b1_hash, b1_number); @@ -2700,7 +2632,7 @@ mod test { assert!(sync.justification_requests().any(|(p, r)| { p == peer_id3 && r.fields == BlockAttributes::JUSTIFICATION && - r.from == message::FromBlock::Hash(b1_hash) && + r.from == FromBlock::Hash(b1_hash) && r.to == None })); @@ -2741,7 +2673,7 @@ mod test { data: Some(Vec::new()), }; - sync.push_block_announce_validation(peer_id.clone(), header.hash(), block_annnounce, true); + sync.push_block_announce_validation(*peer_id, header.hash(), block_annnounce, true); // Poll until we have procssed the block announcement block_on(poll_fn(|cx| loop { @@ -2858,8 +2790,8 @@ mod test { let block3_fork = build_block_at(block2.hash(), false); // Add two peers which are on block 1. - sync.new_peer(peer_id1.clone(), block1.hash(), 1).unwrap(); - sync.new_peer(peer_id2.clone(), block1.hash(), 1).unwrap(); + sync.new_peer(peer_id1, block1.hash(), 1).unwrap(); + sync.new_peer(peer_id2, block1.hash(), 1).unwrap(); // Tell sync that our best block is 3. sync.update_chain_info(&block3.hash(), 3); @@ -2953,9 +2885,9 @@ mod test { let best_block = blocks.last().unwrap().clone(); // Connect the node we will sync from - sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number()) + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) .unwrap(); - sync.new_peer(peer_id2.clone(), info.best_hash, 0).unwrap(); + sync.new_peer(peer_id2, info.best_hash, 0).unwrap(); let mut best_block_num = 0; while best_block_num < MAX_DOWNLOAD_AHEAD { @@ -2981,6 +2913,25 @@ mod test { best_block_num += MAX_BLOCKS_TO_REQUEST as u32; + let _ = sync.on_blocks_processed( + MAX_BLOCKS_TO_REQUEST as usize, + MAX_BLOCKS_TO_REQUEST as usize, + resp_blocks + .iter() + .rev() + .map(|b| { + ( + Ok(BlockImportStatus::ImportedUnknown( + *b.header().number(), + Default::default(), + Some(peer_id1), + )), + b.hash(), + ) + }) + .collect(), + ); + resp_blocks .into_iter() .rev() @@ -3083,7 +3034,7 @@ mod test { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) .unwrap(); send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync); @@ -3097,17 +3048,18 @@ mod test { let response = create_block_response(vec![block.clone()]); let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = match on_block_data.into_request() { - Some(req) => req.1, + request = if let OnBlockData::Request(_peer, request) = on_block_data { + request + } else { // We found the ancenstor - None => break, + break }; log::trace!(target: "sync", "Request: {:?}", request); } // Now request and import the fork. - let mut best_block_num = finalized_block.header().number().clone() as u32; + let mut best_block_num = *finalized_block.header().number() as u32; while best_block_num < *fork_blocks.last().unwrap().header().number() as u32 - 1 { let request = get_block_request( &mut sync, @@ -3140,9 +3092,9 @@ mod test { .map(|b| { ( Ok(BlockImportStatus::ImportedUnknown( - b.header().number().clone(), + *b.header().number(), Default::default(), - Some(peer_id1.clone()), + Some(peer_id1), )), b.hash(), ) @@ -3165,6 +3117,159 @@ mod test { ); } + #[test] + fn syncs_fork_without_duplicate_requests() { + sp_tracing::try_init_simple(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) + .map(|_| build_block(&mut client, None, false)) + .collect::>(); + + let fork_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] + .into_iter() + .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) + .cloned() + .collect::>(); + + fork_blocks + .into_iter() + .chain( + (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) + .map(|_| build_block(&mut client, None, true)), + ) + .collect::>() + }; + + let info = client.info(); + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + Box::new(DefaultBlockAnnounceValidator), + 5, + None, + ) + .unwrap(); + + let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); + let just = (*b"TEST", Vec::new()); + client + .finalize_block(BlockId::Hash(finalized_block.hash()), Some(just)) + .unwrap(); + sync.update_chain_info(&info.best_hash, info.best_number); + + let peer_id1 = PeerId::random(); + + let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) + .unwrap(); + + send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync); + + let mut request = + get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + + // Do the ancestor search + loop { + let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; + let response = create_block_response(vec![block.clone()]); + + let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + request = if let OnBlockData::Request(_peer, request) = on_block_data { + request + } else { + // We found the ancenstor + break + }; + + log::trace!(target: "sync", "Request: {:?}", request); + } + + // Now request and import the fork. + let mut best_block_num = *finalized_block.header().number() as u32; + let mut request = get_block_request( + &mut sync, + FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), + MAX_BLOCKS_TO_REQUEST as u32, + &peer_id1, + ); + let last_block_num = *fork_blocks.last().unwrap().header().number() as u32 - 1; + while best_block_num < last_block_num { + let from = unwrap_from_block_number(request.from.clone()); + + let mut resp_blocks = fork_blocks[best_block_num as usize..from as usize].to_vec(); + resp_blocks.reverse(); + + let response = create_block_response(resp_blocks.clone()); + + let res = sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST + ),); + + best_block_num += MAX_BLOCKS_TO_REQUEST as u32; + + if best_block_num < last_block_num { + // make sure we're not getting a duplicate request in the time before the blocks are + // processed + request = get_block_request( + &mut sync, + FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), + MAX_BLOCKS_TO_REQUEST as u32, + &peer_id1, + ); + } + + let mut notify_imported: Vec<_> = resp_blocks + .iter() + .rev() + .map(|b| { + ( + Ok(BlockImportStatus::ImportedUnknown( + *b.header().number(), + Default::default(), + Some(peer_id1), + )), + b.hash(), + ) + }) + .collect(); + + // The import queue may send notifications in batches of varying size. So we simulate + // this here by splitting the batch into 2 notifications. + let second_batch = notify_imported.split_off(notify_imported.len() / 2); + let _ = sync.on_blocks_processed( + MAX_BLOCKS_TO_REQUEST as usize, + MAX_BLOCKS_TO_REQUEST as usize, + notify_imported, + ); + + let _ = sync.on_blocks_processed( + MAX_BLOCKS_TO_REQUEST as usize, + MAX_BLOCKS_TO_REQUEST as usize, + second_batch, + ); + + resp_blocks + .into_iter() + .rev() + .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); + } + + // Request the tip + get_block_request( + &mut sync, + FromBlock::Hash(fork_blocks.last().unwrap().hash()), + 1, + &peer_id1, + ); + } + #[test] fn removes_target_fork_on_disconnect() { sp_tracing::try_init_simple(); @@ -3183,7 +3288,7 @@ mod test { let peer_id1 = PeerId::random(); let common_block = blocks[1].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) .unwrap(); // Create a "new" header and announce it @@ -3215,7 +3320,7 @@ mod test { let peer_id1 = PeerId::random(); let best_block = blocks[3].clone(); - sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number()) + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) .unwrap(); sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available; diff --git a/client/network/sync/src/schema.rs b/client/network/sync/src/schema.rs index aa3eb84621d8f..b31005360d023 100644 --- a/client/network/sync/src/schema.rs +++ b/client/network/sync/src/schema.rs @@ -18,6 +18,6 @@ //! Include sources generated from protobuf definitions. -pub mod v1 { +pub(crate) mod v1 { include!(concat!(env!("OUT_DIR"), "/api.v1.rs")); } diff --git a/client/network/sync/src/state.rs b/client/network/sync/src/state.rs index 4041c28af0eba..e70d3b6b33a28 100644 --- a/client/network/sync/src/state.rs +++ b/client/network/sync/src/state.rs @@ -23,6 +23,7 @@ use codec::{Decode, Encode}; use log::debug; use sc_client_api::{CompactProof, ProofProvider}; use sc_consensus::ImportedState; +use sc_network_common::sync::StateDownloadProgress; use smallvec::SmallVec; use sp_core::storage::well_known_keys; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; @@ -42,15 +43,6 @@ pub struct StateSync { skip_proof: bool, } -/// Reported state download progress. -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct StateDownloadProgress { - /// Estimated download percentage. - pub percentage: u32, - /// Total state size in bytes downloaded so far. - pub size: u64, -} - /// Import state chunk result. pub enum ImportResult { /// State is complete and ready for import. diff --git a/client/network/sync/src/state_request_handler.rs b/client/network/sync/src/state_request_handler.rs index 8e0bae14046da..6cf6482a44f8b 100644 --- a/client/network/sync/src/state_request_handler.rs +++ b/client/network/sync/src/state_request_handler.rs @@ -27,7 +27,7 @@ use libp2p::PeerId; use log::{debug, trace}; use lru::LruCache; use prost::Message; -use sc_client_api::ProofProvider; +use sc_client_api::{BlockBackend, ProofProvider}; use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, @@ -50,10 +50,16 @@ mod rep { pub const SAME_REQUEST: Rep = Rep::new(i32::MIN, "Same state request multiple times"); } -/// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. -pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { +/// Generates a [`ProtocolConfig`] for the state request protocol, refusing incoming requests. +pub fn generate_protocol_config>( + protocol_id: &ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, +) -> ProtocolConfig { ProtocolConfig { - name: generate_protocol_name(protocol_id).into(), + name: generate_protocol_name(genesis_hash, fork_id).into(), + fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) + .collect(), max_request_size: 1024 * 1024, max_response_size: 16 * 1024 * 1024, request_timeout: Duration::from_secs(40), @@ -61,8 +67,17 @@ pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { } } -/// Generate the state protocol name from chain specific protocol identifier. -fn generate_protocol_name(protocol_id: &ProtocolId) -> String { +/// Generate the state protocol name from the genesis hash and fork id. +fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + if let Some(fork_id) = fork_id { + format!("/{}/{}/state/2", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/state/2", hex::encode(genesis_hash)) + } +} + +/// Generate the legacy state protocol name from chain specific protocol identifier. +fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String { format!("/{}/state/2", protocol_id.as_ref()) } @@ -104,11 +119,12 @@ pub struct StateRequestHandler { impl StateRequestHandler where B: BlockT, - Client: ProofProvider + Send + Sync + 'static, + Client: BlockBackend + ProofProvider + Send + Sync + 'static, { /// Create a new [`StateRequestHandler`]. pub fn new( protocol_id: &ProtocolId, + fork_id: Option<&str>, client: Arc, num_peer_hint: usize, ) -> (Self, ProtocolConfig) { @@ -116,7 +132,15 @@ where // number of peers. let (tx, request_receiver) = mpsc::channel(num_peer_hint); - let mut protocol_config = generate_protocol_config(protocol_id); + let mut protocol_config = generate_protocol_config( + protocol_id, + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + fork_id, + ); protocol_config.inbound_queue = Some(tx); let seen_requests = LruCache::new(num_peer_hint * 2); diff --git a/client/network/sync/src/warp.rs b/client/network/sync/src/warp.rs index d3d9d7d244153..f3fad6c1b7fdb 100644 --- a/client/network/sync/src/warp.rs +++ b/client/network/sync/src/warp.rs @@ -18,60 +18,25 @@ //! Warp sync support. -pub use crate::warp_request_handler::{ - EncodedProof, Request as WarpProofRequest, VerificationResult, WarpSyncProvider, -}; use crate::{ schema::v1::{StateRequest, StateResponse}, state::{ImportResult, StateSync}, }; use sc_client_api::ProofProvider; +use sc_network_common::sync::warp::{ + EncodedProof, VerificationResult, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, + WarpSyncProvider, +}; use sp_blockchain::HeaderBackend; use sp_finality_grandpa::{AuthorityList, SetId}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; -use std::{fmt, sync::Arc}; +use std::sync::Arc; enum Phase { WarpProof { set_id: SetId, authorities: AuthorityList, last_hash: B::Hash }, State(StateSync), } -/// Reported warp sync phase. -#[derive(Clone, Eq, PartialEq, Debug)] -pub enum WarpSyncPhase { - /// Waiting for peers to connect. - AwaitingPeers, - /// Downloading and verifying grandpa warp proofs. - DownloadingWarpProofs, - /// Downloading state data. - DownloadingState, - /// Importing state. - ImportingState, - /// Downloading block history. - DownloadingBlocks(NumberFor), -} - -impl fmt::Display for WarpSyncPhase { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::AwaitingPeers => write!(f, "Waiting for peers"), - Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), - Self::DownloadingState => write!(f, "Downloading state"), - Self::ImportingState => write!(f, "Importing state"), - Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n), - } - } -} - -/// Reported warp sync progress. -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct WarpSyncProgress { - /// Estimated download percentage. - pub phase: WarpSyncPhase, - /// Total bytes downloaded so far. - pub total_bytes: u64, -} - /// Import warp proof result. pub enum WarpProofImportResult { /// Import was successful. diff --git a/client/network/sync/src/warp_request_handler.rs b/client/network/sync/src/warp_request_handler.rs index 4f66e0a6daf17..394bc68449099 100644 --- a/client/network/sync/src/warp_request_handler.rs +++ b/client/network/sync/src/warp_request_handler.rs @@ -1,4 +1,4 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. +// Copyright 2022 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify @@ -16,7 +16,7 @@ //! Helper for handling (i.e. answering) grandpa warp sync requests from a remote peer. -use codec::{Decode, Encode}; +use codec::Decode; use futures::{ channel::{mpsc, oneshot}, stream::StreamExt, @@ -27,57 +27,24 @@ use sc_network_common::{ request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, + sync::warp::{EncodedProof, WarpProofRequest, WarpSyncProvider}, }; use sp_runtime::traits::Block as BlockT; use std::{sync::Arc, time::Duration}; -pub use sp_finality_grandpa::{AuthorityList, SetId}; - -/// Scale-encoded warp sync proof response. -pub struct EncodedProof(pub Vec); - -/// Warp sync request -#[derive(Encode, Decode, Debug)] -pub struct Request { - /// Start collecting proofs from this block. - pub begin: B::Hash, -} - const MAX_RESPONSE_SIZE: u64 = 16 * 1024 * 1024; -/// Proof verification result. -pub enum VerificationResult { - /// Proof is valid, but the target was not reached. - Partial(SetId, AuthorityList, Block::Hash), - /// Target finality is proved. - Complete(SetId, AuthorityList, Block::Header), -} - -/// Warp sync backend. Handles retrieveing and verifying warp sync proofs. -pub trait WarpSyncProvider: Send + Sync { - /// Generate proof starting at given block hash. The proof is accumulated until maximum proof - /// size is reached. - fn generate( - &self, - start: B::Hash, - ) -> Result>; - /// Verify warp proof against current set of authorities. - fn verify( - &self, - proof: &EncodedProof, - set_id: SetId, - authorities: AuthorityList, - ) -> Result, Box>; - /// Get current list of authorities. This is supposed to be genesis authorities when starting - /// sync. - fn current_authorities(&self) -> AuthorityList; -} - /// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing /// incoming requests. -pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestResponseConfig { +pub fn generate_request_response_config>( + protocol_id: ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, +) -> RequestResponseConfig { RequestResponseConfig { - name: generate_protocol_name(protocol_id).into(), + name: generate_protocol_name(genesis_hash, fork_id).into(), + fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) + .collect(), max_request_size: 32, max_response_size: MAX_RESPONSE_SIZE, request_timeout: Duration::from_secs(10), @@ -85,8 +52,17 @@ pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestRespo } } -/// Generate the grandpa warp sync protocol name from chain specific protocol identifier. -fn generate_protocol_name(protocol_id: ProtocolId) -> String { +/// Generate the grandpa warp sync protocol name from the genesi hash and fork id. +fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + if let Some(fork_id) = fork_id { + format!("/{}/{}/sync/warp", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/sync/warp", hex::encode(genesis_hash)) + } +} + +/// Generate the legacy grandpa warp sync protocol name from chain specific protocol identifier. +fn generate_legacy_protocol_name(protocol_id: ProtocolId) -> String { format!("/{}/sync/warp", protocol_id.as_ref()) } @@ -98,13 +74,16 @@ pub struct RequestHandler { impl RequestHandler { /// Create a new [`RequestHandler`]. - pub fn new( + pub fn new>( protocol_id: ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, backend: Arc>, ) -> (Self, RequestResponseConfig) { let (tx, request_receiver) = mpsc::channel(20); - let mut request_response_config = generate_request_response_config(protocol_id); + let mut request_response_config = + generate_request_response_config(protocol_id, genesis_hash, fork_id); request_response_config.inbound_queue = Some(tx); (Self { backend, request_receiver }, request_response_config) @@ -115,7 +94,7 @@ impl RequestHandler { payload: Vec, pending_response: oneshot::Sender, ) -> Result<(), HandleRequestError> { - let request = Request::::decode(&mut &payload[..])?; + let request = WarpProofRequest::::decode(&mut &payload[..])?; let EncodedProof(proof) = self .backend diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 2af1dd8cb9b54..990129229a40f 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -14,18 +14,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-std = "1.11.0" -async-trait = "0.1.50" +async-trait = "0.1.57" futures = "0.3.21" futures-timer = "3.0.1" -libp2p = { version = "0.45.1", default-features = false } +libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" rand = "0.7.2" sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-network = { version = "0.10.0-dev", path = "../" } sc-network-common = { version = "0.10.0-dev", path = "../common" } +sc-network-light = { version = "0.10.0-dev", path = "../light" } +sc-network-sync = { version = "0.10.0-dev", path = "../sync" } sc-service = { version = "0.10.0-dev", default-features = false, features = ["test-helpers"], path = "../../service" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 9e752e81a3bbc..fbe56f463d0f3 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -25,6 +25,7 @@ mod sync; use std::{ borrow::Cow, collections::HashMap, + marker::PhantomData, pin::Pin, sync::Arc, task::{Context as FutureContext, Poll}, @@ -44,20 +45,27 @@ use sc_client_api::{ }; use sc_consensus::{ BasicQueue, BlockCheckParams, BlockImport, BlockImportParams, BoxJustificationImport, - ForkChoiceStrategy, ImportResult, JustificationImport, LongestChain, Verifier, + ForkChoiceStrategy, ImportResult, JustificationImport, JustificationSyncLink, LongestChain, + Verifier, }; pub use sc_network::config::EmptyTransactionPool; use sc_network::{ - block_request_handler::BlockRequestHandler, config::{ - MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, - ProtocolConfig, Role, SyncMode, TransportConfig, + NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, Role, SyncMode, + TransportConfig, }, - light_client_requests::handler::LightClientRequestHandler, - state_request_handler::StateRequestHandler, - warp_request_handler, Multiaddr, NetworkService, NetworkWorker, + Multiaddr, NetworkService, NetworkWorker, +}; +use sc_network_common::{ + config::{MultiaddrWithPeerId, ProtocolId}, + service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest}, + sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, +}; +use sc_network_light::light_client_requests::handler::LightClientRequestHandler; +use sc_network_sync::{ + block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, + warp_request_handler, ChainSync, }; -pub use sc_network_common::config::ProtocolId; use sc_service::client::Client; use sp_blockchain::{ well_known_cache_keys::{self, Id as CacheKeyId}, @@ -65,7 +73,7 @@ use sp_blockchain::{ }; use sp_consensus::{ block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator}, - BlockOrigin, Error as ConsensusError, + BlockOrigin, Error as ConsensusError, SyncOracle, }; use sp_core::H256; use sp_runtime::{ @@ -237,7 +245,7 @@ where { /// Get this peer ID. pub fn id(&self) -> PeerId { - *self.network.service().local_peer_id() + self.network.service().local_peer_id() } /// Returns true if we're major syncing. @@ -562,25 +570,27 @@ impl BlockImportAdapterFull for T where /// This is required as the `TestNetFactory` trait does not distinguish between /// full and light nodes. #[derive(Clone)] -pub struct BlockImportAdapter { +pub struct BlockImportAdapter { inner: I, + _phantom: PhantomData, } -impl BlockImportAdapter { +impl BlockImportAdapter { /// Create a new instance of `Self::Full`. pub fn new(inner: I) -> Self { - Self { inner } + Self { inner, _phantom: PhantomData } } } #[async_trait::async_trait] -impl BlockImport for BlockImportAdapter +impl BlockImport for BlockImportAdapter where I: BlockImport + Send + Sync, I::Transaction: Send, + Transaction: Send + 'static, { type Error = ConsensusError; - type Transaction = (); + type Transaction = Transaction; async fn check_block( &mut self, @@ -591,7 +601,7 @@ where async fn import_block( &mut self, - block: BlockImportParams, + block: BlockImportParams, cache: HashMap>, ) -> Result { self.inner.import_block(block.clear_storage_changes_and_mutate(), cache).await @@ -638,27 +648,26 @@ impl VerifierAdapter { struct TestWarpSyncProvider(Arc>); -impl warp_request_handler::WarpSyncProvider for TestWarpSyncProvider { +impl WarpSyncProvider for TestWarpSyncProvider { fn generate( &self, _start: B::Hash, - ) -> Result> { + ) -> Result> { let info = self.0.info(); let best_header = self.0.header(BlockId::hash(info.best_hash)).unwrap().unwrap(); - Ok(warp_request_handler::EncodedProof(best_header.encode())) + Ok(EncodedProof(best_header.encode())) } fn verify( &self, - proof: &warp_request_handler::EncodedProof, - _set_id: warp_request_handler::SetId, - _authorities: warp_request_handler::AuthorityList, - ) -> Result, Box> - { - let warp_request_handler::EncodedProof(encoded) = proof; + proof: &EncodedProof, + _set_id: SetId, + _authorities: AuthorityList, + ) -> Result, Box> { + let EncodedProof(encoded) = proof; let header = B::Header::decode(&mut encoded.as_slice()).unwrap(); - Ok(warp_request_handler::VerificationResult::Complete(0, Default::default(), header)) + Ok(VerificationResult::Complete(0, Default::default(), header)) } - fn current_authorities(&self) -> warp_request_handler::AuthorityList { + fn current_authorities(&self) -> AuthorityList { Default::default() } } @@ -669,7 +678,7 @@ pub struct FullPeerConfig { /// Pruning window size. /// /// NOTE: only finalized blocks are subject for removal! - pub keep_blocks: Option, + pub blocks_pruning: Option, /// Block announce validator. pub block_announce_validator: Option + Send + Sync>>, /// List of notification protocols that the network must support. @@ -688,7 +697,7 @@ pub struct FullPeerConfig { pub storage_chain: bool, } -pub trait TestNetFactory: Sized +pub trait TestNetFactory: Default + Sized where >::Transaction: Send, { @@ -696,14 +705,8 @@ where type BlockImport: BlockImport + Clone + Send + Sync + 'static; type PeerData: Default; - /// These two need to be implemented! - fn from_config(config: &ProtocolConfig) -> Self; - fn make_verifier( - &self, - client: PeersClient, - config: &ProtocolConfig, - peer_data: &Self::PeerData, - ) -> Self::Verifier; + /// This one needs to be implemented! + fn make_verifier(&self, client: PeersClient, peer_data: &Self::PeerData) -> Self::Verifier; /// Get reference to peer. fn peer(&mut self, i: usize) -> &mut Peer; @@ -723,15 +726,10 @@ where Self::PeerData, ); - fn default_config() -> ProtocolConfig { - ProtocolConfig::default() - } - /// Create new test network with this many peers. fn new(n: usize) -> Self { trace!(target: "test_network", "Creating test network"); - let config = Self::default_config(); - let mut net = Self::from_config(&config); + let mut net = Self::default(); for i in 0..n { trace!(target: "test_network", "Adding peer {}", i); @@ -746,10 +744,10 @@ where /// Add a full peer. fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { - let mut test_client_builder = match (config.keep_blocks, config.storage_chain) { - (Some(keep_blocks), true) => TestClientBuilder::with_tx_storage(keep_blocks), + let mut test_client_builder = match (config.blocks_pruning, config.storage_chain) { + (Some(blocks_pruning), true) => TestClientBuilder::with_tx_storage(blocks_pruning), (None, true) => TestClientBuilder::with_tx_storage(u32::MAX), - (Some(keep_blocks), false) => TestClientBuilder::with_pruning_window(keep_blocks), + (Some(blocks_pruning), false) => TestClientBuilder::with_pruning_window(blocks_pruning), (None, false) => TestClientBuilder::with_default_backend(), }; if let Some(storage) = config.extra_storage { @@ -767,11 +765,8 @@ where let (block_import, justification_import, data) = self .make_block_import(PeersClient { client: client.clone(), backend: backend.clone() }); - let verifier = self.make_verifier( - PeersClient { client: client.clone(), backend: backend.clone() }, - &Default::default(), - &data, - ); + let verifier = self + .make_verifier(PeersClient { client: client.clone(), backend: backend.clone() }, &data); let verifier = VerifierAdapter::new(verifier); let import_queue = Box::new(BasicQueue::new( @@ -804,7 +799,7 @@ where let addrs = connect_to .iter() .map(|v| { - let peer_id = *self.peer(*v).network_service().local_peer_id(); + let peer_id = self.peer(*v).network_service().local_peer_id(); let multiaddr = self.peer(*v).listen_addr.clone(); MultiaddrWithPeerId { peer_id, multiaddr } }) @@ -815,23 +810,25 @@ where let protocol_id = ProtocolId::from("test-protocol-name"); + let fork_id = Some(String::from("test-fork-id")); + let block_request_protocol_config = { let (handler, protocol_config) = - BlockRequestHandler::new(&protocol_id, client.clone(), 50); + BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); self.spawn_task(handler.run().boxed()); protocol_config }; let state_request_protocol_config = { let (handler, protocol_config) = - StateRequestHandler::new(&protocol_id, client.clone(), 50); + StateRequestHandler::new(&protocol_id, None, client.clone(), 50); self.spawn_task(handler.run().boxed()); protocol_config }; let light_client_request_protocol_config = { let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, client.clone()); + LightClientRequestHandler::new(&protocol_id, None, client.clone()); self.spawn_task(handler.run().boxed()); protocol_config }; @@ -839,12 +836,39 @@ where let warp_sync = Arc::new(TestWarpSyncProvider(client.clone())); let warp_protocol_config = { - let (handler, protocol_config) = - warp_request_handler::RequestHandler::new(protocol_id.clone(), warp_sync.clone()); + let (handler, protocol_config) = warp_request_handler::RequestHandler::new( + protocol_id.clone(), + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + None, + warp_sync.clone(), + ); self.spawn_task(handler.run().boxed()); protocol_config }; + let block_announce_validator = config + .block_announce_validator + .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)); + let chain_sync = ChainSync::new( + match network_config.sync_mode { + SyncMode::Full => sc_network_common::sync::SyncMode::Full, + SyncMode::Fast { skip_proofs, storage_chain_mode } => + sc_network_common::sync::SyncMode::LightState { + skip_proofs, + storage_chain_mode, + }, + SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, + }, + client.clone(), + block_announce_validator, + network_config.max_parallel_downloads, + Some(warp_sync), + ) + .unwrap(); let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, @@ -855,15 +879,14 @@ where chain: client.clone(), transaction_pool: Arc::new(EmptyTransactionPool), protocol_id, + fork_id, import_queue, - block_announce_validator: config - .block_announce_validator - .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)), + chain_sync: Box::new(chain_sync), metrics_registry: None, block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, - warp_sync: Some((warp_sync, warp_protocol_config)), + warp_sync_protocol_config: Some(warp_protocol_config), }) .unwrap(); @@ -872,7 +895,7 @@ where self.mut_peers(move |peers| { for peer in peers.iter_mut() { peer.network - .add_known_address(*network.service().local_peer_id(), listen_addr.clone()); + .add_known_address(network.service().local_peer_id(), listen_addr.clone()); } let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); @@ -1012,6 +1035,7 @@ where } } +#[derive(Default)] pub struct TestNet { peers: Vec>, } @@ -1021,17 +1045,7 @@ impl TestNetFactory for TestNet { type PeerData = (); type BlockImport = PeersClient; - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - TestNet { peers: Vec::new() } - } - - fn make_verifier( - &self, - _client: PeersClient, - _config: &ProtocolConfig, - _peer_data: &(), - ) -> Self::Verifier { + fn make_verifier(&self, _client: PeersClient, _peer_data: &()) -> Self::Verifier { PassThroughVerifier::new(false) } @@ -1081,6 +1095,7 @@ impl JustificationImport for ForceFinalized { } } +#[derive(Default)] pub struct JustificationTestNet(TestNet); impl TestNetFactory for JustificationTestNet { @@ -1088,17 +1103,8 @@ impl TestNetFactory for JustificationTestNet { type PeerData = (); type BlockImport = PeersClient; - fn from_config(config: &ProtocolConfig) -> Self { - JustificationTestNet(TestNet::from_config(config)) - } - - fn make_verifier( - &self, - client: PeersClient, - config: &ProtocolConfig, - peer_data: &(), - ) -> Self::Verifier { - self.0.make_verifier(client, config, peer_data) + fn make_verifier(&self, client: PeersClient, peer_data: &()) -> Self::Verifier { + self.0.make_verifier(client, peer_data) } fn peer(&mut self, i: usize) -> &mut Peer { diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 84a5c2ca13fa5..c0778767b75af 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -544,7 +544,7 @@ fn syncs_header_only_forks() { sp_tracing::try_init_simple(); let mut net = TestNet::new(0); net.add_full_peer_with_config(Default::default()); - net.add_full_peer_with_config(FullPeerConfig { keep_blocks: Some(3), ..Default::default() }); + net.add_full_peer_with_config(FullPeerConfig { blocks_pruning: Some(3), ..Default::default() }); net.peer(0).push_blocks(2, false); net.peer(1).push_blocks(2, false); diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 8da2d4be3adde..ff97f29961155 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -21,14 +21,16 @@ futures-timer = "3.0.2" hex = "0.4" hyper = { version = "0.14.16", features = ["stream", "http2"] } hyper-rustls = { version = "0.23.0", features = ["http2"] } +libp2p = { version = "0.46.1", default-features = false } num_cpus = "1.13" once_cell = "1.8" -parking_lot = "0.12.0" +parking_lot = "0.12.1" rand = "0.7.2" threadpool = "1.7" tracing = "0.1.29" sc-client-api = { version = "4.0.0-dev", path = "../api" } -sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } +sc-peerset = { version = "4.0.0-dev", path = "../peerset" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index c80b511c84d17..f40bceb615148 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -22,7 +22,7 @@ use crate::NetworkProvider; use codec::{Decode, Encode}; use futures::Future; pub use http::SharedClient; -use sc_network::{Multiaddr, PeerId}; +use libp2p::{Multiaddr, PeerId}; use sp_core::{ offchain::{ self, HttpError, HttpRequestId, HttpRequestStatus, OffchainStorage, OpaqueMultiaddr, @@ -324,20 +324,93 @@ impl AsyncApi { #[cfg(test)] mod tests { use super::*; + use libp2p::PeerId; use sc_client_db::offchain::LocalStorage; - use sc_network::{NetworkStateInfo, PeerId}; + use sc_network_common::{ + config::MultiaddrWithPeerId, + service::{NetworkPeers, NetworkStateInfo}, + }; + use sc_peerset::ReputationChange; use sp_core::offchain::{DbExternalities, Externalities}; - use std::time::SystemTime; + use std::{borrow::Cow, time::SystemTime}; pub(super) struct TestNetwork(); - impl NetworkProvider for TestNetwork { + impl NetworkPeers for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { - unimplemented!() + unimplemented!(); } fn set_authorized_only(&self, _reserved_only: bool) { - unimplemented!() + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) { + unimplemented!(); + } + + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + unimplemented!(); + } + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: Vec, + ) { + unimplemented!(); + } + + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } + + fn sync_num_connected(&self) -> usize { + unimplemented!(); } } diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index d54d491b04c43..e215a016872cc 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -35,14 +35,14 @@ #![warn(missing_docs)] -use std::{collections::HashSet, fmt, marker::PhantomData, sync::Arc}; +use std::{fmt, marker::PhantomData, sync::Arc}; use futures::{ future::{ready, Future}, prelude::*, }; use parking_lot::Mutex; -use sc_network::{ExHashT, NetworkService, NetworkStateInfo, PeerId}; +use sc_network_common::service::{NetworkPeers, NetworkStateInfo}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_core::{offchain, traits::SpawnNamed, ExecutionContext}; use sp_runtime::{ @@ -60,27 +60,9 @@ const LOG_TARGET: &str = "offchain-worker"; /// NetworkProvider provides [`OffchainWorkers`] with all necessary hooks into the /// underlying Substrate networking. -pub trait NetworkProvider: NetworkStateInfo { - /// Set the authorized peers. - fn set_authorized_peers(&self, peers: HashSet); +pub trait NetworkProvider: NetworkStateInfo + NetworkPeers {} - /// Set the authorized only flag. - fn set_authorized_only(&self, reserved_only: bool); -} - -impl NetworkProvider for NetworkService -where - B: traits::Block + 'static, - H: ExHashT, -{ - fn set_authorized_peers(&self, peers: HashSet) { - NetworkService::set_authorized_peers(self, peers) - } - - fn set_authorized_only(&self, reserved_only: bool) { - NetworkService::set_authorized_only(self, reserved_only) - } -} +impl NetworkProvider for T where T: NetworkStateInfo + NetworkPeers {} /// Options for [`OffchainWorkers`] pub struct OffchainWorkerOptions { @@ -264,13 +246,15 @@ pub async fn notification_future( mod tests { use super::*; use futures::executor::block_on; + use libp2p::{Multiaddr, PeerId}; use sc_block_builder::BlockBuilderProvider as _; use sc_client_api::Backend as _; - use sc_network::{Multiaddr, PeerId}; + use sc_network_common::config::MultiaddrWithPeerId; + use sc_peerset::ReputationChange; use sc_transaction_pool::{BasicPool, FullChainApi}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_consensus::BlockOrigin; - use std::sync::Arc; + use std::{borrow::Cow, collections::HashSet, sync::Arc}; use substrate_test_runtime_client::{ runtime::Block, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClient, TestClientBuilderExt, @@ -288,13 +272,81 @@ mod tests { } } - impl NetworkProvider for TestNetwork { + impl NetworkPeers for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { - unimplemented!() + unimplemented!(); } fn set_authorized_only(&self, _reserved_only: bool) { - unimplemented!() + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) { + unimplemented!(); + } + + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + unimplemented!(); + } + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: Vec, + ) { + unimplemented!(); + } + + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } + + fn sync_num_connected(&self) -> usize { + unimplemented!(); } } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index f150c728613ba..d3d6e267f1768 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -15,9 +15,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.21" -libp2p = { version = "0.45.1", default-features = false } +libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" -serde_json = "1.0.79" +serde_json = "1.0.85" wasm-timer = "0.2" sc-utils = { version = "4.0.0-dev", path = "../utils" } diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 3425ba2b245df..7c4057154bdb0 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" +serde_json = "1.0.85" thiserror = "1.0" sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } @@ -28,4 +28,4 @@ sp-rpc = { version = "6.0.0", path = "../../primitives/rpc" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } sp-version = { version = "5.0.0", path = "../../primitives/version" } -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } diff --git a/client/rpc-api/src/dev/error.rs b/client/rpc-api/src/dev/error.rs index fe74dea256376..43fd3325fa598 100644 --- a/client/rpc-api/src/dev/error.rs +++ b/client/rpc-api/src/dev/error.rs @@ -32,6 +32,9 @@ pub enum Error { /// The re-execution of the specified block failed. #[error("Failed to re-execute the specified block")] BlockExecutionFailed, + /// Failed to extract the proof. + #[error("Failed to extract the proof")] + ProofExtractionFailed, /// The witness compaction failed. #[error("Failed to create to compact the witness")] WitnessCompactionFailed, @@ -54,6 +57,8 @@ impl From for JsonRpseeError { CallError::Custom(ErrorObject::owned(BASE_ERROR + 3, msg, None::<()>)), Error::WitnessCompactionFailed => CallError::Custom(ErrorObject::owned(BASE_ERROR + 4, msg, None::<()>)), + Error::ProofExtractionFailed => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 5, msg, None::<()>)), Error::UnsafeRpcCalled(e) => e.into(), } .into() diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 54bf21674a8bd..40e208c2eba8d 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -265,7 +265,7 @@ pub trait StateApi { /// [substrate storage][1], [transparent keys in substrate][2], /// [querying substrate storage via rpc][3]. /// - /// [1]: https://docs.substrate.io/v3/advanced/storage#storage-map-keys + /// [1]: https://docs.substrate.io/main-docs/fundamentals/state-transitions-and-storage/ /// [2]: https://www.shawntabrizi.com/substrate/transparent-keys-in-substrate/ /// [3]: https://www.shawntabrizi.com/substrate/querying-substrate-storage-via-rpc/ /// diff --git a/client/rpc-api/src/system/helpers.rs b/client/rpc-api/src/system/helpers.rs index e17e0f518c3e6..7ddb3f813c249 100644 --- a/client/rpc-api/src/system/helpers.rs +++ b/client/rpc-api/src/system/helpers.rs @@ -76,8 +76,6 @@ pub struct PeerInfo { pub enum NodeRole { /// The node is a full node Full, - /// The node is a light client - LightClient, /// The node is an authority Authority, } @@ -90,10 +88,10 @@ pub struct SyncState { pub starting_block: Number, /// Height of the current best block of the node. pub current_block: Number, - /// Height of the highest block learned from the network. Missing if no block is known yet. - #[serde(default = "Default::default", skip_serializing_if = "Option::is_none")] - pub highest_block: Option, + /// Height of the highest block in the network. + pub highest_block: Number, } + #[cfg(test)] mod tests { use super::*; @@ -131,7 +129,7 @@ mod tests { ::serde_json::to_string(&SyncState { starting_block: 12u32, current_block: 50u32, - highest_block: Some(128u32), + highest_block: 128u32, }) .unwrap(), r#"{"startingBlock":12,"currentBlock":50,"highestBlock":128}"#, @@ -141,10 +139,10 @@ mod tests { ::serde_json::to_string(&SyncState { starting_block: 12u32, current_block: 50u32, - highest_block: None, + highest_block: 50u32, }) .unwrap(), - r#"{"startingBlock":12,"currentBlock":50}"#, + r#"{"startingBlock":12,"currentBlock":50,"highestBlock":50}"#, ); } } diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index daaa955839045..ef2e6bec4cdb0 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.21" -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } log = "0.4.17" -serde_json = "1.0.79" +serde_json = "1.0.85" tokio = { version = "1.17.0", features = ["parking_lot"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index 5b2ee4bedb7dd..0d77442323241 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -18,11 +18,27 @@ //! RPC middlware to collect prometheus metrics on RPC calls. -use jsonrpsee::core::middleware::Middleware; +use jsonrpsee::core::middleware::{Headers, HttpMiddleware, MethodKind, Params, WsMiddleware}; use prometheus_endpoint::{ register, Counter, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, }; +use std::net::SocketAddr; + +/// Histogram time buckets in microseconds. +const HISTOGRAM_BUCKETS: [f64; 11] = [ + 5.0, + 25.0, + 100.0, + 500.0, + 1_000.0, + 2_500.0, + 10_000.0, + 25_000.0, + 100_000.0, + 1_000_000.0, + 10_000_000.0, +]; /// Metrics for RPC middleware storing information about the number of requests started/completed, /// calls started/completed and their timings. @@ -74,7 +90,8 @@ impl RpcMetrics { HistogramOpts::new( "substrate_rpc_calls_time", "Total time [μs] of processed RPC calls", - ), + ) + .buckets(HISTOGRAM_BUCKETS.to_vec()), &["protocol", "method"], )?, metrics_registry, @@ -134,30 +151,33 @@ impl RpcMiddleware { pub fn new(metrics: RpcMetrics, transport_label: &'static str) -> Self { Self { metrics, transport_label } } -} -impl Middleware for RpcMiddleware { - type Instant = std::time::Instant; - - fn on_connect(&self) { - self.metrics.ws_sessions_opened.as_ref().map(|counter| counter.inc()); - } - - fn on_request(&self) -> Self::Instant { + /// Called when a new JSON-RPC request comes to the server. + fn on_request(&self) -> std::time::Instant { let now = std::time::Instant::now(); self.metrics.requests_started.with_label_values(&[self.transport_label]).inc(); now } - fn on_call(&self, name: &str) { - log::trace!(target: "rpc_metrics", "[{}] on_call name={}", self.transport_label, name); + /// Called on each JSON-RPC method call, batch requests will trigger `on_call` multiple times. + fn on_call(&self, name: &str, params: Params, kind: MethodKind) { + log::trace!( + target: "rpc_metrics", + "[{}] on_call name={} params={:?} kind={}", + self.transport_label, + name, + params, + kind, + ); self.metrics .calls_started .with_label_values(&[self.transport_label, name]) .inc(); } - fn on_result(&self, name: &str, success: bool, started_at: Self::Instant) { + /// Called on each JSON-RPC method completion, batch requests will trigger `on_result` multiple + /// times. + fn on_result(&self, name: &str, success: bool, started_at: std::time::Instant) { let micros = started_at.elapsed().as_micros(); log::debug!( target: "rpc_metrics", @@ -176,17 +196,64 @@ impl Middleware for RpcMiddleware { .with_label_values(&[ self.transport_label, name, - if success { "true" } else { "false" }, + // the label "is_error", so `success` should be regarded as false + // and vice-versa to be registrered correctly. + if success { "false" } else { "true" }, ]) .inc(); } - fn on_response(&self, started_at: Self::Instant) { + /// Called once the JSON-RPC request is finished and response is sent to the output buffer. + fn on_response(&self, _result: &str, started_at: std::time::Instant) { log::trace!(target: "rpc_metrics", "[{}] on_response started_at={:?}", self.transport_label, started_at); self.metrics.requests_finished.with_label_values(&[self.transport_label]).inc(); } +} + +impl WsMiddleware for RpcMiddleware { + type Instant = std::time::Instant; + + fn on_connect(&self, _remote_addr: SocketAddr, _headers: &Headers) { + self.metrics.ws_sessions_opened.as_ref().map(|counter| counter.inc()); + } + + fn on_request(&self) -> Self::Instant { + self.on_request() + } + + fn on_call(&self, name: &str, params: Params, kind: MethodKind) { + self.on_call(name, params, kind) + } - fn on_disconnect(&self) { + fn on_result(&self, name: &str, success: bool, started_at: Self::Instant) { + self.on_result(name, success, started_at) + } + + fn on_response(&self, _result: &str, started_at: Self::Instant) { + self.on_response(_result, started_at) + } + + fn on_disconnect(&self, _remote_addr: SocketAddr) { self.metrics.ws_sessions_closed.as_ref().map(|counter| counter.inc()); } } + +impl HttpMiddleware for RpcMiddleware { + type Instant = std::time::Instant; + + fn on_request(&self, _remote_addr: SocketAddr, _headers: &Headers) -> Self::Instant { + self.on_request() + } + + fn on_call(&self, name: &str, params: Params, kind: MethodKind) { + self.on_call(name, params, kind) + } + + fn on_result(&self, name: &str, success: bool, started_at: Self::Instant) { + self.on_result(name, success, started_at) + } + + fn on_response(&self, _result: &str, started_at: Self::Instant) { + self.on_response(_result, started_at) + } +} diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index e8c657f1a2949..4131fecaf510e 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -16,11 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" hash-db = { version = "0.15.2", default-features = false } -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } lazy_static = { version = "1.4.0", optional = true } log = "0.4.17" -parking_lot = "0.12.0" -serde_json = "1.0.79" +parking_lot = "0.12.1" +serde_json = "1.0.85" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } @@ -46,6 +46,7 @@ assert_matches = "1.3.0" lazy_static = "1.4.0" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } tokio = "1.17.0" diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index b8c4f5d582808..7d0ffdc62e080 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -29,7 +29,8 @@ use codec::{Decode, Encode}; use futures::{FutureExt, TryFutureExt}; use jsonrpsee::{ core::{async_trait, Error as JsonRpseeError, RpcResult}, - PendingSubscription, + types::SubscriptionResult, + SubscriptionSink, }; use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::{ @@ -176,13 +177,13 @@ where .collect()) } - fn watch_extrinsic(&self, pending: PendingSubscription, xt: Bytes) { + fn watch_extrinsic(&self, mut sink: SubscriptionSink, xt: Bytes) -> SubscriptionResult { let best_block_hash = self.client.info().best_hash; let dxt = match TransactionFor::

::decode(&mut &xt[..]).map_err(|e| Error::from(e)) { Ok(dxt) => dxt, Err(e) => { - pending.reject(JsonRpseeError::from(e)); - return + let _ = sink.reject(JsonRpseeError::from(e)); + return Ok(()) }, }; @@ -199,19 +200,15 @@ where let stream = match submit.await { Ok(stream) => stream, Err(err) => { - pending.reject(JsonRpseeError::from(err)); + let _ = sink.reject(JsonRpseeError::from(err)); return }, }; - let mut sink = match pending.accept() { - Some(sink) => sink, - _ => return, - }; - sink.pipe_from_stream(stream).await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + Ok(()) } } diff --git a/client/rpc/src/chain/chain_full.rs b/client/rpc/src/chain/chain_full.rs index c00c6e5875d94..375e724a33d69 100644 --- a/client/rpc/src/chain/chain_full.rs +++ b/client/rpc/src/chain/chain_full.rs @@ -26,7 +26,7 @@ use futures::{ future::{self, FutureExt}, stream::{self, Stream, StreamExt}, }; -use jsonrpsee::PendingSubscription; +use jsonrpsee::SubscriptionSink; use sc_client_api::{BlockBackend, BlockchainEvents}; use sp_blockchain::HeaderBackend; use sp_runtime::{ @@ -69,7 +69,7 @@ where self.client.block(&BlockId::Hash(self.unwrap_or_best(hash))).map_err(client_err) } - fn subscribe_all_heads(&self, sink: PendingSubscription) { + fn subscribe_all_heads(&self, sink: SubscriptionSink) { subscribe_headers( &self.client, &self.executor, @@ -83,7 +83,7 @@ where ) } - fn subscribe_new_heads(&self, sink: PendingSubscription) { + fn subscribe_new_heads(&self, sink: SubscriptionSink) { subscribe_headers( &self.client, &self.executor, @@ -98,7 +98,7 @@ where ) } - fn subscribe_finalized_heads(&self, sink: PendingSubscription) { + fn subscribe_finalized_heads(&self, sink: SubscriptionSink) { subscribe_headers( &self.client, &self.executor, @@ -117,7 +117,7 @@ where fn subscribe_headers( client: &Arc, executor: &SubscriptionTaskExecutor, - pending: PendingSubscription, + mut sink: SubscriptionSink, best_block_hash: G, stream: F, ) where @@ -143,9 +143,7 @@ fn subscribe_headers( let stream = stream::iter(maybe_header).chain(stream()); let fut = async move { - if let Some(mut sink) = pending.accept() { - sink.pipe_from_stream(stream).await; - } + sink.pipe_from_stream(stream).await; }; executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index a300c80271fd1..be06b91ca747f 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -27,7 +27,7 @@ use std::sync::Arc; use crate::SubscriptionTaskExecutor; -use jsonrpsee::{core::RpcResult, PendingSubscription}; +use jsonrpsee::{core::RpcResult, types::SubscriptionResult, SubscriptionSink}; use sc_client_api::BlockchainEvents; use sp_rpc::{list::ListOrValue, number::NumberOrHex}; use sp_runtime::{ @@ -95,13 +95,13 @@ where } /// All new head subscription - fn subscribe_all_heads(&self, sink: PendingSubscription); + fn subscribe_all_heads(&self, sink: SubscriptionSink); /// New best head subscription - fn subscribe_new_heads(&self, sink: PendingSubscription); + fn subscribe_new_heads(&self, sink: SubscriptionSink); /// Finalized head subscription - fn subscribe_finalized_heads(&self, sink: PendingSubscription); + fn subscribe_finalized_heads(&self, sink: SubscriptionSink); } /// Create new state API that works on full node. @@ -160,16 +160,19 @@ where self.backend.finalized_head().map_err(Into::into) } - fn subscribe_all_heads(&self, sink: PendingSubscription) { - self.backend.subscribe_all_heads(sink) + fn subscribe_all_heads(&self, sink: SubscriptionSink) -> SubscriptionResult { + self.backend.subscribe_all_heads(sink); + Ok(()) } - fn subscribe_new_heads(&self, sink: PendingSubscription) { - self.backend.subscribe_new_heads(sink) + fn subscribe_new_heads(&self, sink: SubscriptionSink) -> SubscriptionResult { + self.backend.subscribe_new_heads(sink); + Ok(()) } - fn subscribe_finalized_heads(&self, sink: PendingSubscription) { - self.backend.subscribe_finalized_heads(sink) + fn subscribe_finalized_heads(&self, sink: SubscriptionSink) -> SubscriptionResult { + self.backend.subscribe_finalized_heads(sink); + Ok(()) } } diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index f09da200ff587..7d12458511cfd 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -40,7 +40,7 @@ async fn should_return_header() { Header { parent_hash: H256::from_low_u64_be(0), number: 0, - state_root: res.state_root.clone(), + state_root: res.state_root, extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" .parse() .unwrap(), @@ -54,7 +54,7 @@ async fn should_return_header() { Header { parent_hash: H256::from_low_u64_be(0), number: 0, - state_root: res.state_root.clone(), + state_root: res.state_root, extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" .parse() .unwrap(), @@ -93,7 +93,7 @@ async fn should_return_a_block() { header: Header { parent_hash: client.genesis_hash(), number: 1, - state_root: res.block.header.state_root.clone(), + state_root: res.block.header.state_root, extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" .parse() .unwrap(), @@ -110,7 +110,7 @@ async fn should_return_a_block() { header: Header { parent_hash: client.genesis_hash(), number: 1, - state_root: res.block.header.state_root.clone(), + state_root: res.block.header.state_root, extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" .parse() .unwrap(), diff --git a/client/rpc/src/dev/tests.rs b/client/rpc/src/dev/tests.rs index b7a0de8f5ae0b..f3b18690d0972 100644 --- a/client/rpc/src/dev/tests.rs +++ b/client/rpc/src/dev/tests.rs @@ -17,8 +17,6 @@ // along with this program. If not, see . use super::*; -use assert_matches::assert_matches; -use jsonrpsee::{core::Error as JsonRpseeError, types::error::CallError}; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; @@ -61,9 +59,18 @@ async fn deny_unsafe_works() { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; client.import(BlockOrigin::Own, block).await.unwrap(); - assert_matches!( - api.call::<_, Option>("dev_getBlockStats", [client.info().best_hash]) - .await, - Err(JsonRpseeError::Call(CallError::Custom(err))) if err.message().contains("RPC call is unsafe to be called externally") + let best_hash = client.info().best_hash; + let best_hash_param = + serde_json::to_string(&best_hash).expect("To string must always succeed for block hashes"); + + let request = format!( + "{{\"jsonrpc\":\"2.0\",\"method\":\"dev_getBlockStats\",\"params\":[{}],\"id\":1}}", + best_hash_param + ); + let (resp, _) = api.raw_json_request(&request).await.expect("Raw calls should succeed"); + + assert_eq!( + resp.result, + r#"{"jsonrpc":"2.0","error":{"code":-32601,"message":"RPC call is unsafe to be called externally"},"id":1}"# ); } diff --git a/client/rpc/src/offchain/mod.rs b/client/rpc/src/offchain/mod.rs index b66b78274a64e..6896b82619166 100644 --- a/client/rpc/src/offchain/mod.rs +++ b/client/rpc/src/offchain/mod.rs @@ -57,7 +57,7 @@ impl OffchainApiServer for Offchain { StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX, StorageKind::LOCAL => return Err(JsonRpseeError::from(Error::UnavailableStorageKind)), }; - self.storage.write().set(prefix, &*key, &*value); + self.storage.write().set(prefix, &key, &value); Ok(()) } @@ -69,6 +69,6 @@ impl OffchainApiServer for Offchain { StorageKind::LOCAL => return Err(JsonRpseeError::from(Error::UnavailableStorageKind)), }; - Ok(self.storage.read().get(prefix, &*key).map(Into::into)) + Ok(self.storage.read().get(prefix, &key).map(Into::into)) } } diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 232be4edc8aab..7213e4360ae2b 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -29,7 +29,8 @@ use crate::SubscriptionTaskExecutor; use jsonrpsee::{ core::{Error as JsonRpseeError, RpcResult}, - ws_server::PendingSubscription, + types::SubscriptionResult, + ws_server::SubscriptionSink, }; use sc_rpc_api::{state::ReadProof, DenyUnsafe}; @@ -155,10 +156,10 @@ where ) -> Result; /// New runtime version subscription - fn subscribe_runtime_version(&self, sink: PendingSubscription); + fn subscribe_runtime_version(&self, sink: SubscriptionSink); /// New storage subscription - fn subscribe_storage(&self, sink: PendingSubscription, keys: Option>); + fn subscribe_storage(&self, sink: SubscriptionSink, keys: Option>); } /// Create new state API that works on full node. @@ -318,19 +319,25 @@ where .map_err(Into::into) } - fn subscribe_runtime_version(&self, sink: PendingSubscription) { - self.backend.subscribe_runtime_version(sink) + fn subscribe_runtime_version(&self, sink: SubscriptionSink) -> SubscriptionResult { + self.backend.subscribe_runtime_version(sink); + Ok(()) } - fn subscribe_storage(&self, sink: PendingSubscription, keys: Option>) { + fn subscribe_storage( + &self, + mut sink: SubscriptionSink, + keys: Option>, + ) -> SubscriptionResult { if keys.is_none() { if let Err(err) = self.deny_unsafe.check_if_safe() { let _ = sink.reject(JsonRpseeError::from(err)); - return + return Ok(()) } } - self.backend.subscribe_storage(sink, keys) + self.backend.subscribe_storage(sink, keys); + Ok(()) } } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index c58638c870ab3..42ba70b0af7e7 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -28,7 +28,7 @@ use super::{ use crate::SubscriptionTaskExecutor; use futures::{future, stream, FutureExt, StreamExt}; -use jsonrpsee::{core::Error as JsonRpseeError, PendingSubscription}; +use jsonrpsee::{core::Error as JsonRpseeError, SubscriptionSink}; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, ProofProvider, StorageProvider, @@ -199,7 +199,7 @@ where .call( &BlockId::Hash(block), &method, - &*call_data, + &call_data, self.client.execution_extensions().strategies().other, None, ) @@ -357,7 +357,7 @@ where .map_err(client_err) } - fn subscribe_runtime_version(&self, pending: PendingSubscription) { + fn subscribe_runtime_version(&self, mut sink: SubscriptionSink) { let client = self.client.clone(); let initial = match self @@ -369,7 +369,7 @@ where { Ok(initial) => initial, Err(e) => { - pending.reject(JsonRpseeError::from(e)); + let _ = sink.reject(JsonRpseeError::from(e)); return }, }; @@ -397,19 +397,17 @@ where let stream = futures::stream::once(future::ready(initial)).chain(version_stream); let fut = async move { - if let Some(mut sink) = pending.accept() { - sink.pipe_from_stream(stream).await; - } + sink.pipe_from_stream(stream).await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } - fn subscribe_storage(&self, pending: PendingSubscription, keys: Option>) { + fn subscribe_storage(&self, mut sink: SubscriptionSink, keys: Option>) { let stream = match self.client.storage_changes_notification_stream(keys.as_deref(), None) { Ok(stream) => stream, Err(blockchain_err) => { - pending.reject(JsonRpseeError::from(Error::Client(Box::new(blockchain_err)))); + let _ = sink.reject(JsonRpseeError::from(Error::Client(Box::new(blockchain_err)))); return }, }; @@ -442,9 +440,7 @@ where .filter(|storage| future::ready(!storage.changes.is_empty())); let fut = async move { - if let Some(mut sink) = pending.accept() { - sink.pipe_from_stream(stream).await; - } + sink.pipe_from_stream(stream).await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 77acdf8418ccc..2f91648008ff7 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -99,7 +99,7 @@ fn api>>(sync: T) -> RpcModule> { ); }, Request::NetworkAddReservedPeer(peer, sender) => { - let _ = match sc_network::config::parse_str_addr(&peer) { + let _ = match sc_network_common::config::parse_str_addr(&peer) { Ok(_) => sender.send(Ok(())), Err(s) => sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), @@ -123,7 +123,7 @@ fn api>>(sync: T) -> RpcModule> { let _ = sender.send(SyncState { starting_block: 1, current_block: 2, - highest_block: Some(3), + highest_block: 3, }); }, }; @@ -297,10 +297,7 @@ async fn system_node_roles() { async fn system_sync_state() { let sync_state: SyncState = api(None).call("system_syncState", EmptyParams::new()).await.unwrap(); - assert_eq!( - sync_state, - SyncState { starting_block: 1, current_block: 2, highest_block: Some(3) } - ); + assert_eq!(sync_state, SyncState { starting_block: 1, current_block: 2, highest_block: 3 }); } #[tokio::test] diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index ff11dd1344d01..0acdbb1b1b63e 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -22,18 +22,18 @@ wasmtime = ["sc-executor/wasmtime"] test-helpers = [] [dependencies] -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } thiserror = "1.0.30" futures = "0.3.21" rand = "0.7.3" -parking_lot = "0.12.0" +parking_lot = "0.12.1" log = "0.4.17" futures-timer = "3.0.1" exit-future = "0.2.0" pin-project = "1.0.10" hash-db = "0.15.2" serde = "1.0.136" -serde_json = "1.0.79" +serde_json = "1.0.85" sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } sp-trie = { version = "6.0.0", path = "../../primitives/trie" } @@ -50,8 +50,10 @@ sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/comm sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } sp-storage = { version = "6.0.0", path = "../../primitives/storage" } -sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } +sc-network-light = { version = "0.10.0-dev", path = "../network/light" } +sc-network-sync = { version = "0.10.0-dev", path = "../network/sync" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } @@ -78,7 +80,7 @@ tracing-futures = { version = "0.2.4" } parity-util-mem = { version = "0.11.0", default-features = false, features = [ "primitive-types", ] } -async-trait = "0.1.50" +async-trait = "0.1.57" tokio = { version = "1.17.0", features = ["time", "rt-multi-thread", "parking_lot"] } tempfile = "3.1.0" directories = "4.0.1" diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 5319bf24d5e72..de04af259600b 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -37,13 +37,15 @@ use sc_client_db::{Backend, DatabaseSettings}; use sc_consensus::import_queue::ImportQueue; use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; -use sc_network::{ - block_request_handler::{self, BlockRequestHandler}, - config::{Role, SyncMode}, - light_client_requests::{self, handler::LightClientRequestHandler}, - state_request_handler::{self, StateRequestHandler}, - warp_request_handler::{self, RequestHandler as WarpSyncRequestHandler, WarpSyncProvider}, - NetworkService, +use sc_network::{config::SyncMode, NetworkService}; +use sc_network_common::{ + service::{NetworkStateInfo, NetworkStatusProvider, NetworkTransaction}, + sync::warp::WarpSyncProvider, +}; +use sc_network_light::light_client_requests::handler::LightClientRequestHandler; +use sc_network_sync::{ + block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, + warp_request_handler::RequestHandler as WarpSyncRequestHandler, ChainSync, }; use sc_rpc::{ author::AuthorApiServer, @@ -204,11 +206,10 @@ where let (client, backend) = { let db_config = sc_client_db::DatabaseSettings { - state_cache_size: config.state_cache_size, - state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), + trie_cache_maximum_size: config.trie_cache_maximum_size, state_pruning: config.state_pruning.clone(), source: config.database.clone(), - keep_blocks: config.keep_blocks, + blocks_pruning: config.blocks_pruning, }; let backend = new_db_backend(db_config)?; @@ -320,6 +321,31 @@ where ) } +/// Shared network instance implementing a set of mandatory traits. +pub trait SpawnTaskNetwork: + sc_offchain::NetworkProvider + + NetworkStateInfo + + NetworkTransaction + + NetworkStatusProvider + + Send + + Sync + + 'static +{ +} + +impl SpawnTaskNetwork for T +where + Block: BlockT, + T: sc_offchain::NetworkProvider + + NetworkStateInfo + + NetworkTransaction + + NetworkStatusProvider + + Send + + Sync + + 'static, +{ +} + /// Parameters to pass into `build`. pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { /// The service configuration. @@ -338,7 +364,7 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub rpc_builder: Box Result, Error>>, /// A shared network instance. - pub network: Arc::Hash>>, + pub network: Arc>, /// A Sender for RPC requests. pub system_rpc_tx: TracingUnboundedSender>, /// Telemetry instance for this node. @@ -350,7 +376,7 @@ pub fn build_offchain_workers( config: &Configuration, spawn_handle: SpawnTaskHandle, client: Arc, - network: Arc::Hash>>, + network: Arc, ) -> Option>> where TBl: BlockT, @@ -517,13 +543,14 @@ where Ok(rpc_handlers) } -async fn transaction_notifications( - transaction_pool: Arc, - network: Arc::Hash>>, +async fn transaction_notifications( + transaction_pool: Arc, + network: Network, telemetry: Option, ) where - TBl: BlockT, - TExPool: MaintainedTransactionPool::Hash>, + Block: BlockT, + ExPool: MaintainedTransactionPool::Hash>, + Network: NetworkTransaction<::Hash> + Send + Sync, { // transaction notifications transaction_pool @@ -543,13 +570,18 @@ async fn transaction_notifications( .await; } -fn init_telemetry>( +fn init_telemetry( config: &mut Configuration, - network: Arc::Hash>>, - client: Arc, + network: Network, + client: Arc, telemetry: &mut Telemetry, sysinfo: Option, -) -> sc_telemetry::Result { +) -> sc_telemetry::Result +where + Block: BlockT, + Client: BlockBackend, + Network: NetworkStateInfo, +{ let genesis_hash = client.block_hash(Zero::zero()).ok().flatten().unwrap_or_default(); let connection_message = ConnectionMessage { name: config.network.node_name.to_owned(), @@ -727,11 +759,8 @@ where } } - let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { - imports_external_transactions: !matches!(config.role, Role::Light), - pool: transaction_pool, - client: client.clone(), - }); + let transaction_pool_adapter = + Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }); let protocol_id = config.protocol_id(); @@ -742,65 +771,71 @@ where }; let block_request_protocol_config = { - if matches!(config.role, Role::Light) { - // Allow outgoing requests but deny incoming requests. - block_request_handler::generate_protocol_config(&protocol_id) - } else { - // Allow both outgoing and incoming requests. - let (handler, protocol_config) = BlockRequestHandler::new( - &protocol_id, - client.clone(), - config.network.default_peers_set.in_peers as usize + - config.network.default_peers_set.out_peers as usize, - ); - spawn_handle.spawn("block-request-handler", Some("networking"), handler.run()); - protocol_config - } + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = BlockRequestHandler::new( + &protocol_id, + config.chain_spec.fork_id(), + client.clone(), + config.network.default_peers_set.in_peers as usize + + config.network.default_peers_set.out_peers as usize, + ); + spawn_handle.spawn("block-request-handler", Some("networking"), handler.run()); + protocol_config }; let state_request_protocol_config = { - if matches!(config.role, Role::Light) { - // Allow outgoing requests but deny incoming requests. - state_request_handler::generate_protocol_config(&protocol_id) - } else { - // Allow both outgoing and incoming requests. - let (handler, protocol_config) = StateRequestHandler::new( - &protocol_id, - client.clone(), - config.network.default_peers_set_num_full as usize, - ); - spawn_handle.spawn("state-request-handler", Some("networking"), handler.run()); - protocol_config - } + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = StateRequestHandler::new( + &protocol_id, + config.chain_spec.fork_id(), + client.clone(), + config.network.default_peers_set_num_full as usize, + ); + spawn_handle.spawn("state-request-handler", Some("networking"), handler.run()); + protocol_config }; - let warp_sync_params = warp_sync.map(|provider| { - let protocol_config = if matches!(config.role, Role::Light) { - // Allow outgoing requests but deny incoming requests. - warp_request_handler::generate_request_response_config(protocol_id.clone()) - } else { + let (warp_sync_provider, warp_sync_protocol_config) = warp_sync + .map(|provider| { // Allow both outgoing and incoming requests. - let (handler, protocol_config) = - WarpSyncRequestHandler::new(protocol_id.clone(), provider.clone()); + let (handler, protocol_config) = WarpSyncRequestHandler::new( + protocol_id.clone(), + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + config.chain_spec.fork_id(), + provider.clone(), + ); spawn_handle.spawn("warp-sync-request-handler", Some("networking"), handler.run()); - protocol_config - }; - (provider, protocol_config) - }); + (Some(provider), Some(protocol_config)) + }) + .unwrap_or_default(); let light_client_request_protocol_config = { - if matches!(config.role, Role::Light) { - // Allow outgoing requests but deny incoming requests. - light_client_requests::generate_protocol_config(&protocol_id) - } else { - // Allow both outgoing and incoming requests. - let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, client.clone()); - spawn_handle.spawn("light-client-request-handler", Some("networking"), handler.run()); - protocol_config - } + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = LightClientRequestHandler::new( + &protocol_id, + config.chain_spec.fork_id(), + client.clone(), + ); + spawn_handle.spawn("light-client-request-handler", Some("networking"), handler.run()); + protocol_config }; + let chain_sync = ChainSync::new( + match config.network.sync_mode { + SyncMode::Full => sc_network_common::sync::SyncMode::Full, + SyncMode::Fast { skip_proofs, storage_chain_mode } => + sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, + SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, + }, + client.clone(), + block_announce_validator, + config.network.max_parallel_downloads, + warp_sync_provider, + )?; let network_params = sc_network::config::Params { role: config.role.clone(), executor: { @@ -818,13 +853,14 @@ where network_config: config.network.clone(), chain: client.clone(), transaction_pool: transaction_pool_adapter as _, - import_queue: Box::new(import_queue), protocol_id, - block_announce_validator, + fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), + import_queue: Box::new(import_queue), + chain_sync: Box::new(chain_sync), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_request_protocol_config, state_request_protocol_config, - warp_sync: warp_sync_params, + warp_sync_protocol_config, light_client_request_protocol_config, }; diff --git a/client/service/src/chain_ops/revert_chain.rs b/client/service/src/chain_ops/revert_chain.rs index 9a3ce6024ed92..3ee4399d063b3 100644 --- a/client/service/src/chain_ops/revert_chain.rs +++ b/client/service/src/chain_ops/revert_chain.rs @@ -40,6 +40,13 @@ where info!("There aren't any non-finalized blocks to revert."); } else { info!("Reverted {} blocks. Best: #{} ({})", reverted.0, info.best_number, info.best_hash); + + if reverted.0 > blocks { + info!( + "Number of reverted blocks is higher than requested \ + because of reverted leaves higher than the best block." + ) + } } Ok(()) } diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 1e8114df13339..de851ac848919 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -28,7 +28,7 @@ use sp_core::{ use sp_externalities::Extensions; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_state_machine::{ - self, backend::Backend as _, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, + backend::AsTrieBackend, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, StateMachine, StorageProof, }; use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; @@ -224,15 +224,11 @@ where match recorder { Some(recorder) => { - let trie_state = state.as_trie_backend().ok_or_else(|| { - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) - as Box - })?; - - let backend = sp_state_machine::ProvingBackend::new_with_recorder( - trie_state, - recorder.clone(), - ); + let trie_state = state.as_trie_backend(); + + let backend = sp_state_machine::TrieBackendBuilder::wrap(&trie_state) + .with_recorder(recorder.clone()) + .build(); let mut state_machine = StateMachine::new( &backend, @@ -294,10 +290,7 @@ where ) -> sp_blockchain::Result<(Vec, StorageProof)> { let state = self.backend.state_at(*at)?; - let trie_backend = state.as_trie_backend().ok_or_else(|| { - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) - as Box - })?; + let trie_backend = state.as_trie_backend(); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend); let runtime_code = diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index fb73b4c34c040..9d6a2e0457a84 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1327,7 +1327,7 @@ where Some(&root), ) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; - let proving_backend = sp_state_machine::TrieBackend::new(db, root); + let proving_backend = sp_state_machine::TrieBackendBuilder::new(db, root).build(); let state = read_range_proof_check_with_child_on_proving_backend::>( &proving_backend, start_key, @@ -1614,11 +1614,11 @@ where RA: Send + Sync, { fn header(&self, id: BlockId) -> sp_blockchain::Result> { - (**self).backend.blockchain().header(id) + self.backend.blockchain().header(id) } fn info(&self) -> blockchain::Info { - (**self).backend.blockchain().info() + self.backend.blockchain().info() } fn status(&self, id: BlockId) -> sp_blockchain::Result { @@ -1689,6 +1689,10 @@ where fn runtime_version_at(&self, at: &BlockId) -> Result { CallExecutor::runtime_version(&self.executor, at).map_err(Into::into) } + + fn state_at(&self, at: &BlockId) -> Result { + self.state_at(at).map_err(Into::into) + } } /// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 0eeb6e05cee16..2fb3f820ebf15 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -19,19 +19,18 @@ //! Service configuration. pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStrategy}; -pub use sc_client_db::{Database, DatabaseSource, KeepBlocks, PruningMode}; +pub use sc_client_db::{BlocksPruning, Database, DatabaseSource, PruningMode}; pub use sc_executor::WasmExecutionMethod; #[cfg(feature = "wasmtime")] pub use sc_executor::WasmtimeInstantiationStrategy; pub use sc_network::{ config::{ - MultiaddrWithPeerId, NetworkConfiguration, NodeKeyConfig, NonDefaultSetConfig, Role, - SetConfig, TransportConfig, + NetworkConfiguration, NodeKeyConfig, NonDefaultSetConfig, Role, SetConfig, TransportConfig, }, Multiaddr, }; pub use sc_network_common::{ - config::ProtocolId, + config::{MultiaddrWithPeerId, ProtocolId}, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, @@ -70,16 +69,16 @@ pub struct Configuration { pub keystore_remote: Option, /// Configuration for the database. pub database: DatabaseSource, - /// Size of internal state cache in Bytes - pub state_cache_size: usize, - /// Size in percent of cache size dedicated to child tries - pub state_cache_child_ratio: Option, + /// Maximum size of internal trie cache in bytes. + /// + /// If `None` is given the cache is disabled. + pub trie_cache_maximum_size: Option, /// State pruning settings. pub state_pruning: Option, /// Number of blocks to keep in the db. /// /// NOTE: only finalized blocks are subject for removal! - pub keep_blocks: KeepBlocks, + pub blocks_pruning: BlocksPruning, /// Chain configuration. pub chain_spec: Box, /// Wasm execution method. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 24ba670cfcd65..19358c1e5bc4c 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -42,9 +42,11 @@ use jsonrpsee::{core::Error as JsonRpseeError, RpcModule}; use log::{debug, error, warn}; use sc_client_api::{blockchain::HeaderBackend, BlockBackend, BlockchainEvents, ProofProvider}; use sc_network::PeerId; +use sc_network_common::{config::MultiaddrWithPeerId, service::NetworkBlock}; use sc_rpc_server::WsConfig; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; +use sp_consensus::SyncOracle; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header as HeaderT}, @@ -60,7 +62,7 @@ pub use self::{ error::Error, }; pub use config::{ - BasePath, Configuration, DatabaseSource, KeepBlocks, PruningMode, Role, RpcMethods, TaskType, + BasePath, BlocksPruning, Configuration, DatabaseSource, PruningMode, Role, RpcMethods, TaskType, }; pub use sc_chain_spec::{ ChainSpec, ChainType, Extension as ChainSpecExtension, GenericChainSpec, NoExtension, @@ -101,7 +103,10 @@ impl RpcHandlers { &self, json_query: &str, ) -> Result<(String, mpsc::UnboundedReceiver), JsonRpseeError> { - self.0.raw_json_request(json_query).await + self.0 + .raw_json_request(json_query) + .await + .map(|(method_res, recv)| (method_res.result, recv)) } /// Provides access to the underlying `RpcModule` @@ -225,8 +230,15 @@ async fn build_network_future< } } sc_rpc::system::Request::NetworkAddReservedPeer(peer_addr, sender) => { - let x = network.add_reserved_peer(peer_addr) - .map_err(sc_rpc::system::error::Error::MalformattedPeerArg); + let result = match MultiaddrWithPeerId::try_from(peer_addr) { + Ok(peer) => { + network.add_reserved_peer(peer) + }, + Err(err) => { + Err(err.to_string()) + }, + }; + let x = result.map_err(sc_rpc::system::error::Error::MalformattedPeerArg); let _ = sender.send(x); } sc_rpc::system::Request::NetworkRemoveReservedPeer(peer_id, sender) => { @@ -253,7 +265,6 @@ async fn build_network_future< let node_role = match role { Role::Authority { .. } => NodeRole::Authority, - Role::Light => NodeRole::LightClient, Role::Full => NodeRole::Full, }; @@ -262,10 +273,12 @@ async fn build_network_future< sc_rpc::system::Request::SyncState(sender) => { use sc_rpc::system::SyncState; + let best_number = client.info().best_number; + let _ = sender.send(SyncState { starting_block, - current_block: client.info().best_number, - highest_block: network.best_seen_block(), + current_block: best_number, + highest_block: network.best_seen_block().unwrap_or(best_number), }); } } @@ -370,14 +383,14 @@ where match tokio::task::block_in_place(|| { config.tokio_handle.block_on(futures::future::try_join(http_fut, ws_fut)) }) { - Ok((http, ws)) => Ok(Box::new((http, ws))), + Ok((http, ws)) => + Ok(Box::new((waiting::HttpServer(Some(http)), waiting::WsServer(Some(ws))))), Err(e) => Err(Error::Application(e)), } } /// Transaction pool adapter. pub struct TransactionPoolAdapter { - imports_external_transactions: bool, pool: Arc

, client: Arc, } @@ -425,11 +438,6 @@ where } fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture { - if !self.imports_external_transactions { - debug!("Transaction rejected"); - return Box::pin(futures::future::ready(TransactionImport::None)) - } - let encoded = transaction.encode(); let uxt = match Decode::decode(&mut &encoded[..]) { Ok(uxt) => uxt, diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 555023f894488..13b249a7b9563 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -22,7 +22,8 @@ use crate::config::Configuration; use futures_timer::Delay; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::{ClientInfo, UsageProvider}; -use sc_network::{config::Role, NetworkService, NetworkStatus}; +use sc_network::config::Role; +use sc_network_common::service::{NetworkStatus, NetworkStatusProvider}; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::{MaintainedTransactionPool, PoolStatus}; use sc_utils::metrics::register_globals; @@ -160,7 +161,7 @@ impl MetricsService { ) -> Result { let role_bits = match config.role { Role::Full => 1u64, - Role::Light => 2u64, + // 2u64 used to represent light client role Role::Authority { .. } => 4u64, }; @@ -182,15 +183,16 @@ impl MetricsService { /// Returns a never-ending `Future` that performs the /// metric and telemetry updates with information from /// the given sources. - pub async fn run( + pub async fn run( mut self, client: Arc, transactions: Arc, - network: Arc::Hash>>, + network: TNet, ) where TBl: Block, TCl: ProvideRuntimeApi + UsageProvider, TExPool: MaintainedTransactionPool::Hash>, + TNet: NetworkStatusProvider, { let mut timer = Delay::new(Duration::from_secs(0)); let timer_interval = Duration::from_secs(5); @@ -298,9 +300,10 @@ impl MetricsService { UniqueSaturatedInto::::unique_saturated_into(num) }); - if let Some(best_seen_block) = best_seen_block { - metrics.block_height.with_label_values(&["sync_target"]).set(best_seen_block); - } + metrics + .block_height + .with_label_values(&["sync_target"]) + .set(best_seen_block.unwrap_or(best_number)); } } } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index d003db57eb7ac..92df5381c202b 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -18,7 +18,7 @@ hex = "0.4" hex-literal = "0.3.4" log = "0.4.17" parity-scale-codec = "3.0.0" -parking_lot = "0.12.0" +parking_lot = "0.12.1" tempfile = "3.1.0" tokio = { version = "1.17.0", features = ["time"] } sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } @@ -27,6 +27,7 @@ sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../.. sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-executor = { version = "0.10.0-dev", path = "../../executor" } sc-network = { version = "0.10.0-dev", path = "../../network" } +sc-network-common = { version = "0.10.0-dev", path = "../../network/common" } sc-service = { version = "0.10.0-dev", features = ["test-helpers"], path = "../../service" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 136efad088fae..f02b1321d2922 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -23,7 +23,7 @@ use sc_block_builder::BlockBuilderProvider; use sc_client_api::{ in_mem, BlockBackend, BlockchainEvents, FinalityNotifications, StorageProvider, }; -use sc_client_db::{Backend, DatabaseSettings, DatabaseSource, KeepBlocks, PruningMode}; +use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; @@ -410,7 +410,7 @@ fn best_containing_with_genesis_block() { assert_eq!( genesis_hash.clone(), - block_on(longest_chain_select.finality_target(genesis_hash.clone(), None)).unwrap(), + block_on(longest_chain_select.finality_target(genesis_hash, None)).unwrap(), ); } @@ -1197,10 +1197,9 @@ fn doesnt_import_blocks_that_revert_finality() { let backend = Arc::new( Backend::new( DatabaseSettings { - state_cache_size: 1 << 20, - state_cache_child_ratio: None, + trie_cache_maximum_size: Some(1 << 20), state_pruning: Some(PruningMode::ArchiveAll), - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, @@ -1333,9 +1332,9 @@ fn respects_block_rules() { .block; let params = BlockCheckParams { - hash: block_ok.hash().clone(), + hash: block_ok.hash(), number: 0, - parent_hash: block_ok.header().parent_hash().clone(), + parent_hash: *block_ok.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1349,9 +1348,9 @@ fn respects_block_rules() { let block_not_ok = block_not_ok.build().unwrap().block; let params = BlockCheckParams { - hash: block_not_ok.hash().clone(), + hash: block_not_ok.hash(), number: 0, - parent_hash: block_not_ok.header().parent_hash().clone(), + parent_hash: *block_not_ok.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1372,15 +1371,15 @@ fn respects_block_rules() { let block_ok = block_ok.build().unwrap().block; let params = BlockCheckParams { - hash: block_ok.hash().clone(), + hash: block_ok.hash(), number: 1, - parent_hash: block_ok.header().parent_hash().clone(), + parent_hash: *block_ok.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, }; if record_only { - fork_rules.push((1, block_ok.hash().clone())); + fork_rules.push((1, block_ok.hash())); } assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); @@ -1391,9 +1390,9 @@ fn respects_block_rules() { let block_not_ok = block_not_ok.build().unwrap().block; let params = BlockCheckParams { - hash: block_not_ok.hash().clone(), + hash: block_not_ok.hash(), number: 1, - parent_hash: block_not_ok.header().parent_hash().clone(), + parent_hash: *block_not_ok.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1424,10 +1423,9 @@ fn returns_status_for_pruned_blocks() { let backend = Arc::new( Backend::new( DatabaseSettings { - state_cache_size: 1 << 20, - state_cache_child_ratio: None, - state_pruning: Some(PruningMode::keep_blocks(1)), - keep_blocks: KeepBlocks::All, + trie_cache_maximum_size: Some(1 << 20), + state_pruning: Some(PruningMode::blocks_pruning(1)), + blocks_pruning: BlocksPruning::All, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, @@ -1457,9 +1455,9 @@ fn returns_status_for_pruned_blocks() { let b1 = b1.build().unwrap().block; let check_block_a1 = BlockCheckParams { - hash: a1.hash().clone(), + hash: a1.hash(), number: 0, - parent_hash: a1.header().parent_hash().clone(), + parent_hash: *a1.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1494,9 +1492,9 @@ fn returns_status_for_pruned_blocks() { block_on(client.import_as_final(BlockOrigin::Own, a2.clone())).unwrap(); let check_block_a2 = BlockCheckParams { - hash: a2.hash().clone(), + hash: a2.hash(), number: 1, - parent_hash: a1.header().parent_hash().clone(), + parent_hash: *a1.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1528,9 +1526,9 @@ fn returns_status_for_pruned_blocks() { block_on(client.import_as_final(BlockOrigin::Own, a3.clone())).unwrap(); let check_block_a3 = BlockCheckParams { - hash: a3.hash().clone(), + hash: a3.hash(), number: 2, - parent_hash: a2.header().parent_hash().clone(), + parent_hash: *a2.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1563,9 +1561,9 @@ fn returns_status_for_pruned_blocks() { ); let mut check_block_b1 = BlockCheckParams { - hash: b1.hash().clone(), + hash: b1.hash(), number: 0, - parent_hash: b1.header().parent_hash().clone(), + parent_hash: *b1.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 749c83c6eeac7..11c1cbaf7afb1 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -24,13 +24,17 @@ use parking_lot::Mutex; use sc_client_api::{Backend, CallExecutor}; use sc_network::{ config::{NetworkConfiguration, TransportConfig}, - multiaddr, Multiaddr, + multiaddr, +}; +use sc_network_common::{ + config::MultiaddrWithPeerId, + service::{NetworkBlock, NetworkPeers, NetworkStateInfo}, }; use sc_service::{ client::Client, config::{BasePath, DatabaseSource, KeystoreConfig}, - ChainSpecExtension, Configuration, Error, GenericChainSpec, KeepBlocks, Role, RuntimeGenesis, - SpawnTaskHandle, TaskManager, + BlocksPruning, ChainSpecExtension, Configuration, Error, GenericChainSpec, Role, + RuntimeGenesis, SpawnTaskHandle, TaskManager, }; use sc_transaction_pool_api::TransactionPool; use sp_api::BlockId; @@ -48,8 +52,8 @@ const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); struct TestNet { runtime: Runtime, - authority_nodes: Vec<(usize, F, U, Multiaddr)>, - full_nodes: Vec<(usize, F, U, Multiaddr)>, + authority_nodes: Vec<(usize, F, U, MultiaddrWithPeerId)>, + full_nodes: Vec<(usize, F, U, MultiaddrWithPeerId)>, chain_spec: GenericChainSpec, base_port: u16, nodes: usize, @@ -231,10 +235,9 @@ fn node_config< keystore_remote: Default::default(), keystore: KeystoreConfig::Path { path: root.join("key"), password: None }, database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, - state_cache_size: 16777216, - state_cache_child_ratio: None, + trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Default::default(), - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, chain_spec: Box::new((*spec).clone()), wasm_method: sc_service::config::WasmExecutionMethod::Interpreted, wasm_runtime_overrides: Default::default(), @@ -320,7 +323,7 @@ where handle.spawn(service.clone().map_err(|_| ())); let addr = - addr.with(multiaddr::Protocol::P2p((*service.network().local_peer_id()).into())); + MultiaddrWithPeerId { multiaddr: addr, peer_id: service.network().local_peer_id() }; self.authority_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -340,7 +343,7 @@ where handle.spawn(service.clone().map_err(|_| ())); let addr = - addr.with(multiaddr::Protocol::P2p((*service.network().local_peer_id()).into())); + MultiaddrWithPeerId { multiaddr: addr, peer_id: service.network().local_peer_id() }; self.full_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -382,12 +385,12 @@ where for (_, service, _, _) in network.full_nodes.iter().skip(1) { service .network() - .add_reserved_peer(first_address.to_string()) + .add_reserved_peer(first_address.clone()) .expect("Error adding reserved peer"); } network.run_until_all_full(move |_index, service| { - let connected = service.network().num_connected(); + let connected = service.network().sync_num_connected(); debug!("Got {}/{} full connections...", connected, expected_full_connections); connected == expected_full_connections }); @@ -414,7 +417,7 @@ where if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { service .network() - .add_reserved_peer(address.to_string()) + .add_reserved_peer(address) .expect("Error adding reserved peer"); address = node_id.clone(); } @@ -422,7 +425,7 @@ where } network.run_until_all_full(move |_index, service| { - let connected = service.network().num_connected(); + let connected = service.network().sync_num_connected(); debug!("Got {}/{} full connections...", connected, expected_full_connections); connected == expected_full_connections }); @@ -479,7 +482,7 @@ pub fn sync( for (_, service, _, _) in network.full_nodes.iter().skip(1) { service .network() - .add_reserved_peer(first_address.to_string()) + .add_reserved_peer(first_address.clone()) .expect("Error adding reserved peer"); } @@ -532,13 +535,13 @@ pub fn consensus( for (_, service, _, _) in network.full_nodes.iter() { service .network() - .add_reserved_peer(first_address.to_string()) + .add_reserved_peer(first_address.clone()) .expect("Error adding reserved peer"); } for (_, service, _, _) in network.authority_nodes.iter().skip(1) { service .network() - .add_reserved_peer(first_address.to_string()) + .add_reserved_peer(first_address.clone()) .expect("Error adding reserved peer"); } network.run_until_all_full(|_index, service| { @@ -556,7 +559,7 @@ pub fn consensus( for (_, service, _, _) in network.full_nodes.iter() { service .network() - .add_reserved_peer(first_address.to_string()) + .add_reserved_peer(first_address.clone()) .expect("Error adding reserved peer"); } diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 08856ce3f48a9..4243968ec79b4 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -17,6 +17,6 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive log = "0.4.17" parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" -parking_lot = "0.12.0" +parking_lot = "0.12.1" sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index d5cca9a342187..1c7140777e16e 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -227,7 +227,7 @@ pub enum PruningMode { impl PruningMode { /// Create a mode that keeps given number of blocks. - pub fn keep_blocks(n: u32) -> PruningMode { + pub fn blocks_pruning(n: u32) -> PruningMode { PruningMode::Constrained(Constraints { max_blocks: Some(n), max_mem: None }) } @@ -835,34 +835,34 @@ mod tests { #[test] fn pruning_mode_compatibility() { for (created, reopened, expected) in [ - (None, None, Ok(PruningMode::keep_blocks(256))), - (None, Some(PruningMode::keep_blocks(256)), Ok(PruningMode::keep_blocks(256))), - (None, Some(PruningMode::keep_blocks(128)), Ok(PruningMode::keep_blocks(128))), - (None, Some(PruningMode::keep_blocks(512)), Ok(PruningMode::keep_blocks(512))), + (None, None, Ok(PruningMode::blocks_pruning(256))), + (None, Some(PruningMode::blocks_pruning(256)), Ok(PruningMode::blocks_pruning(256))), + (None, Some(PruningMode::blocks_pruning(128)), Ok(PruningMode::blocks_pruning(128))), + (None, Some(PruningMode::blocks_pruning(512)), Ok(PruningMode::blocks_pruning(512))), (None, Some(PruningMode::ArchiveAll), Err(())), (None, Some(PruningMode::ArchiveCanonical), Err(())), - (Some(PruningMode::keep_blocks(256)), None, Ok(PruningMode::keep_blocks(256))), + (Some(PruningMode::blocks_pruning(256)), None, Ok(PruningMode::blocks_pruning(256))), ( - Some(PruningMode::keep_blocks(256)), - Some(PruningMode::keep_blocks(256)), - Ok(PruningMode::keep_blocks(256)), + Some(PruningMode::blocks_pruning(256)), + Some(PruningMode::blocks_pruning(256)), + Ok(PruningMode::blocks_pruning(256)), ), ( - Some(PruningMode::keep_blocks(256)), - Some(PruningMode::keep_blocks(128)), - Ok(PruningMode::keep_blocks(128)), + Some(PruningMode::blocks_pruning(256)), + Some(PruningMode::blocks_pruning(128)), + Ok(PruningMode::blocks_pruning(128)), ), ( - Some(PruningMode::keep_blocks(256)), - Some(PruningMode::keep_blocks(512)), - Ok(PruningMode::keep_blocks(512)), + Some(PruningMode::blocks_pruning(256)), + Some(PruningMode::blocks_pruning(512)), + Ok(PruningMode::blocks_pruning(512)), ), - (Some(PruningMode::keep_blocks(256)), Some(PruningMode::ArchiveAll), Err(())), - (Some(PruningMode::keep_blocks(256)), Some(PruningMode::ArchiveCanonical), Err(())), + (Some(PruningMode::blocks_pruning(256)), Some(PruningMode::ArchiveAll), Err(())), + (Some(PruningMode::blocks_pruning(256)), Some(PruningMode::ArchiveCanonical), Err(())), (Some(PruningMode::ArchiveAll), None, Ok(PruningMode::ArchiveAll)), - (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(256)), Err(())), - (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(128)), Err(())), - (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(512)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(256)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(128)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(512)), Err(())), ( Some(PruningMode::ArchiveAll), Some(PruningMode::ArchiveAll), @@ -870,9 +870,9 @@ mod tests { ), (Some(PruningMode::ArchiveAll), Some(PruningMode::ArchiveCanonical), Err(())), (Some(PruningMode::ArchiveCanonical), None, Ok(PruningMode::ArchiveCanonical)), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(256)), Err(())), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(128)), Err(())), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(512)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(256)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(128)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(512)), Err(())), (Some(PruningMode::ArchiveCanonical), Some(PruningMode::ArchiveAll), Err(())), ( Some(PruningMode::ArchiveCanonical), diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index bd9194092fa3d..12ffc0c2e8d7a 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -14,9 +14,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" +serde_json = "1.0.85" thiserror = "1.0.30" sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } diff --git a/client/sysinfo/Cargo.toml b/client/sysinfo/Cargo.toml index 0973631a3cc24..1e96f69a92dfe 100644 --- a/client/sysinfo/Cargo.toml +++ b/client/sysinfo/Cargo.toml @@ -21,7 +21,7 @@ rand = "0.7.3" rand_pcg = "0.2.1" regex = "1" serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" +serde_json = "1.0.85" sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-io = { version = "6.0.0", path = "../../primitives/io" } diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 9682dc824f930..4be7c186720fc 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -16,12 +16,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] chrono = "0.4.19" futures = "0.3.21" -libp2p = { version = "0.45.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } +libp2p = { version = "0.46.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" pin-project = "1.0.10" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" +serde_json = "1.0.85" thiserror = "1.0.30" wasm-timer = "0.2.5" diff --git a/client/telemetry/src/transport.rs b/client/telemetry/src/transport.rs index e21a2380be255..d64da44a83b6b 100644 --- a/client/telemetry/src/transport.rs +++ b/client/telemetry/src/transport.rs @@ -31,7 +31,8 @@ const CONNECT_TIMEOUT: Duration = Duration::from_secs(20); pub(crate) fn initialize_transport() -> Result { let transport = { - let inner = block_on(libp2p::dns::DnsConfig::system(libp2p::tcp::TcpConfig::new()))?; + let tcp_transport = libp2p::tcp::TcpTransport::new(libp2p::tcp::GenTcpConfig::new()); + let inner = block_on(libp2p::dns::DnsConfig::system(tcp_transport))?; libp2p::websocket::framed::WsConfig::new(inner).and_then(|connec, _| { let connec = connec .with(|item| { diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 3f0bbbe922d3f..8bc5d770c9c5a 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -20,13 +20,13 @@ lazy_static = "1.4.0" libc = "0.2.121" log = { version = "0.4.17" } once_cell = "1.8.0" -parking_lot = "0.12.0" +parking_lot = "0.12.1" regex = "1.5.5" rustc-hash = "1.1.0" serde = "1.0.136" thiserror = "1.0.30" tracing = "0.1.29" -tracing-log = { version = "0.1.3", features = ["interest-cache"] } +tracing-log = "0.1.3" tracing-subscriber = { version = "0.2.25", features = ["parking_lot"] } sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-rpc-server = { version = "4.0.0-dev", path = "../rpc-servers" } diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs index 33c83dd87189e..58941617bfb6a 100644 --- a/client/tracing/src/logging/mod.rs +++ b/client/tracing/src/logging/mod.rs @@ -155,10 +155,7 @@ where let max_level_hint = Layer::::max_level_hint(&env_filter); let max_level = to_log_level_filter(max_level_hint); - tracing_log::LogTracer::builder() - .with_max_level(max_level) - .with_interest_cache(tracing_log::InterestCacheConfig::default()) - .init()?; + tracing_log::LogTracer::builder().with_max_level(max_level).init()?; // If we're only logging `INFO` entries then we'll use a simplified logging format. let detailed_output = match max_level_hint { diff --git a/client/tracing/src/logging/stderr_writer.rs b/client/tracing/src/logging/stderr_writer.rs index e62c5e82c1ac7..de78a61af41a2 100644 --- a/client/tracing/src/logging/stderr_writer.rs +++ b/client/tracing/src/logging/stderr_writer.rs @@ -89,7 +89,7 @@ fn flush_logs(mut buffer: parking_lot::lock_api::MutexGuard::Extrinsic, ) -> Self::ValidationFuture { let nonce = uxt.transfer().nonce; - let from = uxt.transfer().from.clone(); + let from = uxt.transfer().from; match self.block_id_to_number(at) { Ok(Some(num)) if num > 5 => return ready(Ok(Err(InvalidTransaction::Stale.into()))), @@ -76,7 +76,7 @@ impl ChainApi for TestApi { ready(Ok(Ok(ValidTransaction { priority: 4, requires: if nonce > 1 && self.nonce_dependant { - vec![to_tag(nonce - 1, from.clone())] + vec![to_tag(nonce - 1, from)] } else { vec![] }, diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 618ba8ccf24d5..19acbddbe7843 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, sync::Arc, time::Duration}; use futures::{channel::mpsc::Receiver, Future}; use sc_transaction_pool_api::error; @@ -108,6 +108,8 @@ pub struct Options { pub future: base::Limit, /// Reject future transactions. pub reject_future_transactions: bool, + /// How long the extrinsic is banned for. + pub ban_time: Duration, } impl Default for Options { @@ -116,6 +118,7 @@ impl Default for Options { ready: base::Limit { count: 8192, total_bytes: 20 * 1024 * 1024 }, future: base::Limit { count: 512, total_bytes: 1 * 1024 * 1024 }, reject_future_transactions: false, + ban_time: Duration::from_secs(60 * 30), } } } @@ -638,7 +641,7 @@ mod tests { .unwrap(); // when - block_on(pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1.clone()])).unwrap(); + block_on(pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1])).unwrap(); // then assert!(pool.validated_pool.is_banned(&hash1)); @@ -790,12 +793,8 @@ mod tests { assert_eq!(pool.validated_pool().status().future, 0); // when - block_on(pool.prune_tags( - &BlockId::Number(2), - vec![vec![0u8]], - vec![watcher.hash().clone()], - )) - .unwrap(); + block_on(pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![*watcher.hash()])) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); diff --git a/client/transaction-pool/src/graph/rotator.rs b/client/transaction-pool/src/graph/rotator.rs index b897fe7885033..47e00a1292155 100644 --- a/client/transaction-pool/src/graph/rotator.rs +++ b/client/transaction-pool/src/graph/rotator.rs @@ -51,6 +51,11 @@ impl Default for PoolRotator { } impl PoolRotator { + /// New rotator instance with specified ban time. + pub fn new(ban_time: Duration) -> Self { + Self { ban_time, banned_until: Default::default() } + } + /// Returns `true` if extrinsic hash is currently banned. pub fn is_banned(&self, hash: &Hash) -> bool { self.banned_until.read().contains_key(hash) @@ -115,7 +120,7 @@ mod tests { let tx = Transaction { data: (), bytes: 1, - hash: hash.clone(), + hash, priority: 5, valid_till: 1, requires: vec![], diff --git a/client/transaction-pool/src/graph/validated_pool.rs b/client/transaction-pool/src/graph/validated_pool.rs index 084b04842ee90..dcb8195073733 100644 --- a/client/transaction-pool/src/graph/validated_pool.rs +++ b/client/transaction-pool/src/graph/validated_pool.rs @@ -24,7 +24,6 @@ use std::{ use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; -use retain_mut::RetainMut; use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions}; use serde::Serialize; use sp_runtime::{ @@ -125,6 +124,7 @@ impl ValidatedPool { /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { let base_pool = base::BasePool::new(options.reject_future_transactions); + let ban_time = options.ban_time; Self { is_validator, options, @@ -132,7 +132,7 @@ impl ValidatedPool { api, pool: RwLock::new(base_pool), import_notification_sinks: Default::default(), - rotator: Default::default(), + rotator: PoolRotator::new(ban_time), } } @@ -203,21 +203,20 @@ impl ValidatedPool { let imported = self.pool.write().import(tx)?; if let base::Imported::Ready { ref hash, .. } = imported { - RetainMut::retain_mut(&mut *self.import_notification_sinks.lock(), |sink| { - match sink.try_send(*hash) { - Ok(()) => true, - Err(e) => - if e.is_full() { - log::warn!( - target: "txpool", - "[{:?}] Trying to notify an import but the channel is full", - hash, - ); - true - } else { - false - }, - } + let sinks = &mut self.import_notification_sinks.lock(); + sinks.retain_mut(|sink| match sink.try_send(*hash) { + Ok(()) => true, + Err(e) => + if e.is_full() { + log::warn!( + target: "txpool", + "[{:?}] Trying to notify an import but the channel is full", + hash, + ); + true + } else { + false + }, }); } diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index 2243f140cb22d..17c2cfa8a1e06 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -254,7 +254,7 @@ fn should_resubmit_from_retracted_during_maintenance() { let header = api.push_block(1, vec![], true); let fork_header = api.push_block(1, vec![], false); - let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api()); + let event = block_event_with_retracted(header, fork_header.hash(), pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 1); @@ -272,7 +272,7 @@ fn should_not_resubmit_from_retracted_during_maintenance_if_tx_is_also_in_enacte let header = api.push_block(1, vec![xt.clone()], true); let fork_header = api.push_block(1, vec![xt], false); - let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api()); + let event = block_event_with_retracted(header, fork_header.hash(), pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -292,7 +292,7 @@ fn should_not_retain_invalid_hashes_from_retracted() { let fork_header = api.push_block(1, vec![xt.clone()], false); api.add_invalid(&xt); - let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api()); + let event = block_event_with_retracted(header, fork_header.hash(), pool.api()); block_on(pool.maintain(event)); assert_eq!( @@ -387,7 +387,7 @@ fn should_push_watchers_during_maintenance() { let header_hash = header.hash(); block_on(pool.maintain(block_event(header))); - let event = ChainEvent::Finalized { hash: header_hash.clone(), tree_route: Arc::from(vec![]) }; + let event = ChainEvent::Finalized { hash: header_hash, tree_route: Arc::from(vec![]) }; block_on(pool.maintain(event)); // then @@ -398,24 +398,24 @@ fn should_push_watchers_during_maintenance() { futures::executor::block_on_stream(watcher0).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock(header_hash.clone()), - TransactionStatus::Finalized(header_hash.clone()) + TransactionStatus::InBlock(header_hash), + TransactionStatus::Finalized(header_hash) ], ); assert_eq!( futures::executor::block_on_stream(watcher1).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock(header_hash.clone()), - TransactionStatus::Finalized(header_hash.clone()) + TransactionStatus::InBlock(header_hash), + TransactionStatus::Finalized(header_hash) ], ); assert_eq!( futures::executor::block_on_stream(watcher2).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock(header_hash.clone()), - TransactionStatus::Finalized(header_hash.clone()) + TransactionStatus::InBlock(header_hash), + TransactionStatus::Finalized(header_hash) ], ); } @@ -533,7 +533,7 @@ fn fork_aware_finalization() { let header = pool.api().push_block(3, vec![from_charlie.clone()], true); canon_watchers.push((watcher, header.hash())); - let event = block_event_with_retracted(header.clone(), d2, &*pool.api()); + let event = block_event_with_retracted(header.clone(), d2, pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); @@ -573,7 +573,7 @@ fn fork_aware_finalization() { for (canon_watcher, h) in canon_watchers { let mut stream = futures::executor::block_on_stream(canon_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(h.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(h))); assert_eq!(stream.next(), Some(TransactionStatus::Finalized(h))); assert_eq!(stream.next(), None); } @@ -581,22 +581,22 @@ fn fork_aware_finalization() { { let mut stream = futures::executor::block_on_stream(from_dave_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c2.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c2))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(c2))); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_bob_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d2.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d2))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(d2))); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1))); assert_eq!(stream.next(), None); } } @@ -633,7 +633,7 @@ fn prune_and_retract_tx_at_same_time() { let header = pool.api().push_block(2, vec![from_alice.clone()], false); assert_eq!(pool.status().ready, 0); - let event = block_event_with_retracted(header.clone(), b1, &*pool.api()); + let event = block_event_with_retracted(header.clone(), b1, pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -646,9 +646,9 @@ fn prune_and_retract_tx_at_same_time() { { let mut stream = futures::executor::block_on_stream(watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2))); assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2))); assert_eq!(stream.next(), None); } @@ -708,7 +708,7 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { // Block D2 { let header = pool.api().push_block(2, vec![], false); - let event = block_event_with_retracted(header, d0, &*pool.api()); + let event = block_event_with_retracted(header, d0, pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); } @@ -781,7 +781,7 @@ fn resubmit_from_retracted_fork() { let e1 = { let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx4.clone())) .expect("1. Imported"); - let header = pool.api().push_block_with_parent(d1.clone(), vec![tx4.clone()], true); + let header = pool.api().push_block_with_parent(d1, vec![tx4.clone()], true); assert_eq!(pool.status().ready, 2); header.hash() }; @@ -790,7 +790,7 @@ fn resubmit_from_retracted_fork() { let f1_header = { let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx5.clone())) .expect("1. Imported"); - let header = pool.api().push_block_with_parent(e1.clone(), vec![tx5.clone()], true); + let header = pool.api().push_block_with_parent(e1, vec![tx5.clone()], true); // Don't announce the block event to the pool directly, because we will // re-org to this block. assert_eq!(pool.status().ready, 3); @@ -801,7 +801,7 @@ fn resubmit_from_retracted_fork() { let expected_ready = vec![tx3, tx4, tx5].iter().map(Encode::encode).collect::>(); assert_eq!(expected_ready, ready); - let event = block_event_with_retracted(f1_header, f0, &*pool.api()); + let event = block_event_with_retracted(f1_header, f0, pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 3); diff --git a/client/utils/Cargo.toml b/client/utils/Cargo.toml index 2df04be7fb4af..082ac3b55e80d 100644 --- a/client/utils/Cargo.toml +++ b/client/utils/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.21" futures-timer = "3.0.2" lazy_static = "1.4.0" log = "0.4" -parking_lot = "0.12.0" +parking_lot = "0.12.1" prometheus = { version = "0.13.0", default-features = false } [features] diff --git a/docs/README.adoc b/docs/README.adoc index 0b82f0ed82a13..5d7b0b52c5250 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -445,7 +445,7 @@ pallet-assets, pallet-balances, pallet-consensus, pallet-contracts, pallet-counc frame-executive, pallet-session, pallet-staking, pallet-timestamp, pallet-treasury * Node [source, shell] -node-cli, node-consensus, node-executor, node-network, node-primitives, node-runtime +node-cli, node-consensus, node-executor, node-network, node-primitives, kitchensink-runtime * Subkey [source, shell] subkey diff --git a/frame/alliance/README.md b/frame/alliance/README.md index f0900c84cbd85..f91475a5984ea 100644 --- a/frame/alliance/README.md +++ b/frame/alliance/README.md @@ -43,6 +43,7 @@ to update the Alliance's rule and make announcements. #### For Members (All) +- `give_retirement_notice` - Give a retirement notice and start a retirement period required to pass in order to retire. - `retire` - Retire from the Alliance and release the caller's deposit. #### For Members (Founders/Fellows) diff --git a/frame/alliance/src/benchmarking.rs b/frame/alliance/src/benchmarking.rs index 527c35b58a5d8..33cf933aba421 100644 --- a/frame/alliance/src/benchmarking.rs +++ b/frame/alliance/src/benchmarking.rs @@ -332,7 +332,7 @@ benchmarks_instance_pallet! { // Whitelist voter account from further DB operations. let voter_key = frame_system::Account::::hashed_key_for(&voter); frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); @@ -417,7 +417,7 @@ benchmarks_instance_pallet! { index, true, )?; - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); @@ -489,7 +489,7 @@ benchmarks_instance_pallet! { System::::set_block_number(T::BlockNumber::max_value()); - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); @@ -562,7 +562,7 @@ benchmarks_instance_pallet! { // caller is prime, prime already votes aye by creating the proposal System::::set_block_number(T::BlockNumber::max_value()); - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); @@ -662,7 +662,7 @@ benchmarks_instance_pallet! { assert!(!Alliance::::is_member(&outsider)); assert_eq!(DepositOf::::get(&outsider), None); - let outsider_lookup: ::Source = T::Lookup::unlookup(outsider.clone()); + let outsider_lookup = T::Lookup::unlookup(outsider.clone()); }: _(SystemOrigin::Signed(founder1.clone()), outsider_lookup) verify { assert!(Alliance::::is_member_of(&outsider, MemberRole::Ally)); // outsider is now an ally @@ -681,7 +681,7 @@ benchmarks_instance_pallet! { let ally1 = ally::(1); assert!(Alliance::::is_ally(&ally1)); - let ally1_lookup: ::Source = T::Lookup::unlookup(ally1.clone()); + let ally1_lookup = T::Lookup::unlookup(ally1.clone()); let call = Call::::elevate_ally { ally: ally1_lookup }; let origin = T::MembershipManager::successful_origin(); }: { call.dispatch_bypass_filter(origin)? } @@ -691,12 +691,37 @@ benchmarks_instance_pallet! { assert_last_event::(Event::AllyElevated { ally: ally1 }.into()); } + give_retirement_notice { + set_members::(); + let fellow2 = fellow::(2); + + assert!(Alliance::::is_fellow(&fellow2)); + }: _(SystemOrigin::Signed(fellow2.clone())) + verify { + assert!(Alliance::::is_member_of(&fellow2, MemberRole::Retiring)); + + assert_eq!( + RetiringMembers::::get(&fellow2), + Some(System::::block_number() + T::RetirementPeriod::get()) + ); + assert_last_event::( + Event::MemberRetirementPeriodStarted {member: fellow2}.into() + ); + } + retire { set_members::(); let fellow2 = fellow::(2); assert!(Alliance::::is_fellow(&fellow2)); - assert!(!Alliance::::is_up_for_kicking(&fellow2)); + + assert_eq!( + Alliance::::give_retirement_notice( + SystemOrigin::Signed(fellow2.clone()).into() + ), + Ok(()) + ); + System::::set_block_number(System::::block_number() + T::RetirementPeriod::get()); assert_eq!(DepositOf::::get(&fellow2), Some(T::AllyDeposit::get())); }: _(SystemOrigin::Signed(fellow2.clone())) @@ -713,14 +738,10 @@ benchmarks_instance_pallet! { set_members::(); let fellow2 = fellow::(2); - UpForKicking::::insert(&fellow2, true); - assert!(Alliance::::is_member_of(&fellow2, MemberRole::Fellow)); - assert!(Alliance::::is_up_for_kicking(&fellow2)); - assert_eq!(DepositOf::::get(&fellow2), Some(T::AllyDeposit::get())); - let fellow2_lookup: ::Source = T::Lookup::unlookup(fellow2.clone()); + let fellow2_lookup = T::Lookup::unlookup(fellow2.clone()); let call = Call::::kick_member { who: fellow2_lookup }; let origin = T::MembershipManager::successful_origin(); }: { call.dispatch_bypass_filter(origin)? } diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index f9e85e270af16..bc0a119cd54a6 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -60,6 +60,8 @@ //! //! #### For Members (All) //! +//! - `give_retirement_notice` - Give a retirement notice and start a retirement period required to +//! pass in order to retire. //! - `retire` - Retire from the Alliance and release the caller's deposit. //! //! #### For Members (Founders/Fellows) @@ -93,13 +95,14 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migration; mod types; pub mod weights; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use sp_runtime::{ - traits::{StaticLookup, Zero}, + traits::{Saturating, StaticLookup, Zero}, RuntimeDebug, }; use sp_std::{convert::TryInto, prelude::*}; @@ -124,6 +127,9 @@ pub use pallet::*; pub use types::*; pub use weights::*; +/// The log target of this pallet. +pub const LOG_TARGET: &str = "runtime::alliance"; + /// Simple index type for proposal counting. pub type ProposalIndex = u32; @@ -198,6 +204,7 @@ pub enum MemberRole { Founder, Fellow, Ally, + Retiring, } /// The type of item that may be deemed unscrupulous. @@ -210,12 +217,15 @@ pub enum UnscrupulousItem { type UnscrupulousItemOf = UnscrupulousItem<::AccountId, UrlOf>; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; #[pallet::pallet] #[pallet::generate_store(pub (super) trait Store)] + #[pallet::storage_version(migration::STORAGE_VERSION)] pub struct Pallet(PhantomData<(T, I)>); #[pallet::config] @@ -306,12 +316,18 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// The number of blocks a member must wait between giving a retirement notice and retiring. + /// Supposed to be greater than time required to `kick_member`. + type RetirementPeriod: Get; } #[pallet::error] pub enum Error { /// The founders/fellows/allies have already been initialized. - MembersAlreadyInitialized, + AllianceAlreadyInitialized, + /// The Alliance has not been initialized yet, therefore accounts cannot join it. + AllianceNotYetInitialized, /// Account is already a member. AlreadyMember, /// Account is not a member. @@ -320,8 +336,6 @@ pub mod pallet { NotAlly, /// Account is not a founder. NotFounder, - /// This member is up for being kicked from the Alliance and cannot perform this operation. - UpForKicking, /// Account does not have voting rights. NoVotingRights, /// Account is already an elevated (fellow) member. @@ -353,6 +367,12 @@ pub mod pallet { TooManyMembers, /// Number of announcements exceeds `MaxAnnouncementsCount`. TooManyAnnouncements, + /// Account already gave retirement notice + AlreadyRetiring, + /// Account did not give a retirement notice required to retire. + RetirementNoticeNotGiven, + /// Retirement period has not passed. + RetirementPeriodNotPassed, } #[pallet::event] @@ -378,6 +398,8 @@ pub mod pallet { }, /// An ally has been elevated to Fellow. AllyElevated { ally: T::AccountId }, + /// A member gave retirement notice and their retirement period started. + MemberRetirementPeriodStarted { member: T::AccountId }, /// A member has retired with its deposit unreserved. MemberRetired { member: T::AccountId, unreserved: Option> }, /// A member has been kicked out with its deposit slashed. @@ -434,6 +456,11 @@ pub mod pallet { Members::::insert(MemberRole::Fellow, members); } if !self.allies.is_empty() { + // Only allow Allies if the Alliance is "initialized". + assert!( + Pallet::::is_initialized(), + "Alliance must have Founders or Fellows to have Allies" + ); let members: BoundedVec = self.allies.clone().try_into().expect("Too many genesis allies"); Members::::insert(MemberRole::Ally, members); @@ -476,12 +503,12 @@ pub mod pallet { ValueQuery, >; - /// A set of members that are (potentially) being kicked out. They cannot retire until the - /// motion is settled. + /// A set of members who gave a retirement notice. They can retire after the end of retirement + /// period stored as a future block number. #[pallet::storage] - #[pallet::getter(fn up_for_kicking)] - pub type UpForKicking, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, T::AccountId, bool, ValueQuery>; + #[pallet::getter(fn retiring_members)] + pub type RetiringMembers, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, T::AccountId, T::BlockNumber, OptionQuery>; /// The current list of accounts deemed unscrupulous. These accounts non grata cannot submit /// candidacy. @@ -516,11 +543,6 @@ pub mod pallet { let proposor = ensure_signed(origin)?; ensure!(Self::has_voting_rights(&proposor), Error::::NoVotingRights); - if let Some(Call::kick_member { who }) = proposal.is_sub_type() { - let strike = T::Lookup::lookup(who.clone())?; - >::insert(strike, true); - } - T::ProposalProvider::propose_proposal(proposor, threshold, proposal, length_bound)?; Ok(()) } @@ -612,6 +634,11 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; + // Cannot be called if the Alliance already has Founders or Fellows. + // TODO: Remove check and allow Root to set members at any time. + // https://github.com/paritytech/substrate/issues/11928 + ensure!(!Self::is_initialized(), Error::::AllianceAlreadyInitialized); + let mut founders: BoundedVec = founders.try_into().map_err(|_| Error::::TooManyMembers)?; let mut fellows: BoundedVec = @@ -619,12 +646,6 @@ pub mod pallet { let mut allies: BoundedVec = allies.try_into().map_err(|_| Error::::TooManyMembers)?; - ensure!( - !Self::has_member(MemberRole::Founder) && - !Self::has_member(MemberRole::Fellow) && - !Self::has_member(MemberRole::Ally), - Error::::MembersAlreadyInitialized - ); for member in founders.iter().chain(fellows.iter()).chain(allies.iter()) { Self::has_identity(member)?; } @@ -705,6 +726,15 @@ pub mod pallet { pub fn join_alliance(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; + // We don't want anyone to join as an Ally before the Alliance has been initialized via + // Root call. The reasons are two-fold: + // + // 1. There is no `Rule` or admission criteria, so the joiner would be an ally to + // nought, and + // 2. It adds complexity to the initialization, namely deciding to overwrite accounts + // that already joined as an Ally. + ensure!(Self::is_initialized(), Error::::AllianceNotYetInitialized); + // Unscrupulous accounts are non grata. ensure!(!Self::is_unscrupulous_account(&who), Error::::AccountNonGrata); ensure!(!Self::is_member(&who), Error::::AlreadyMember); @@ -729,10 +759,7 @@ pub mod pallet { /// A founder or fellow can nominate someone to join the alliance as an Ally. /// There is no deposit required to the nominator or nominee. #[pallet::weight(T::WeightInfo::nominate_ally())] - pub fn nominate_ally( - origin: OriginFor, - who: ::Source, - ) -> DispatchResult { + pub fn nominate_ally(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let nominator = ensure_signed(origin)?; ensure!(Self::has_voting_rights(&nominator), Error::::NoVotingRights); let who = T::Lookup::lookup(who)?; @@ -756,10 +783,7 @@ pub mod pallet { /// Elevate an ally to fellow. #[pallet::weight(T::WeightInfo::elevate_ally())] - pub fn elevate_ally( - origin: OriginFor, - ally: ::Source, - ) -> DispatchResult { + pub fn elevate_ally(origin: OriginFor, ally: AccountIdLookupOf) -> DispatchResult { T::MembershipManager::ensure_origin(origin)?; let ally = T::Lookup::lookup(ally)?; ensure!(Self::is_ally(&ally), Error::::NotAlly); @@ -772,15 +796,40 @@ pub mod pallet { Ok(()) } + /// As a member, give a retirement notice and start a retirement period required to pass in + /// order to retire. + #[pallet::weight(T::WeightInfo::give_retirement_notice())] + pub fn give_retirement_notice(origin: OriginFor) -> DispatchResult { + let who = ensure_signed(origin)?; + let role = Self::member_role_of(&who).ok_or(Error::::NotMember)?; + ensure!(role.ne(&MemberRole::Retiring), Error::::AlreadyRetiring); + + Self::remove_member(&who, role)?; + Self::add_member(&who, MemberRole::Retiring)?; + >::insert( + &who, + frame_system::Pallet::::block_number() + .saturating_add(T::RetirementPeriod::get()), + ); + + Self::deposit_event(Event::MemberRetirementPeriodStarted { member: who }); + Ok(()) + } + /// As a member, retire from the alliance and unreserve the deposit. + /// This can only be done once you have `give_retirement_notice` and it has expired. #[pallet::weight(T::WeightInfo::retire())] pub fn retire(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; - // A member up for kicking cannot retire. - ensure!(!Self::is_up_for_kicking(&who), Error::::UpForKicking); + let retirement_period_end = RetiringMembers::::get(&who) + .ok_or(Error::::RetirementNoticeNotGiven)?; + ensure!( + frame_system::Pallet::::block_number() >= retirement_period_end, + Error::::RetirementPeriodNotPassed + ); - let role = Self::member_role_of(&who).ok_or(Error::::NotMember)?; - Self::remove_member(&who, role)?; + Self::remove_member(&who, MemberRole::Retiring)?; + >::remove(&who); let deposit = DepositOf::::take(&who); if let Some(deposit) = deposit { let err_amount = T::Currency::unreserve(&who, deposit); @@ -792,10 +841,7 @@ pub mod pallet { /// Kick a member from the alliance and slash its deposit. #[pallet::weight(T::WeightInfo::kick_member())] - pub fn kick_member( - origin: OriginFor, - who: ::Source, - ) -> DispatchResult { + pub fn kick_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::MembershipManager::ensure_origin(origin)?; let member = T::Lookup::lookup(who)?; @@ -806,8 +852,6 @@ pub mod pallet { T::Slashed::on_unbalanced(T::Currency::slash_reserved(&member, deposit).0); } - >::remove(&member); - Self::deposit_event(Event::MemberKicked { member, slashed: deposit }); Ok(()) } @@ -867,6 +911,11 @@ pub mod pallet { } impl, I: 'static> Pallet { + /// Check if the Alliance has been initialized. + fn is_initialized() -> bool { + Self::has_member(MemberRole::Founder) || Self::has_member(MemberRole::Fellow) + } + /// Check if a given role has any members. fn has_member(role: MemberRole) -> bool { Members::::decode_len(role).unwrap_or_default() > 0 @@ -917,11 +966,6 @@ impl, I: 'static> Pallet { founders.into() } - /// Check if an account's forced removal is up for consideration. - fn is_up_for_kicking(who: &T::AccountId) -> bool { - >::contains_key(&who) - } - /// Add a user to the sorted alliance member set. fn add_member(who: &T::AccountId, role: MemberRole) -> DispatchResult { >::try_mutate(role, |members| -> DispatchResult { diff --git a/frame/alliance/src/migration.rs b/frame/alliance/src/migration.rs new file mode 100644 index 0000000000000..010603902f0fd --- /dev/null +++ b/frame/alliance/src/migration.rs @@ -0,0 +1,72 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{Config, Pallet, Weight, LOG_TARGET}; +use frame_support::{pallet_prelude::*, storage::migration, traits::OnRuntimeUpgrade}; +use log; + +/// The current storage version. +pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + +/// Wrapper for all migrations of this pallet. +pub fn migrate, I: 'static>() -> Weight { + let onchain_version = Pallet::::on_chain_storage_version(); + let mut weight: Weight = Weight::new(); + + if onchain_version < 1 { + weight = weight.saturating_add(v0_to_v1::migrate::()); + } + + STORAGE_VERSION.put::>(); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + weight +} + +/// Implements `OnRuntimeUpgrade` trait. +pub struct Migration(PhantomData<(T, I)>); + +impl, I: 'static> OnRuntimeUpgrade for Migration { + fn on_runtime_upgrade() -> Weight { + migrate::() + } +} + +/// v0_to_v1: `UpForKicking` is replaced by a retirement period. +mod v0_to_v1 { + use super::*; + + pub fn migrate, I: 'static>() -> Weight { + if migration::clear_storage_prefix( + >::name().as_bytes(), + b"UpForKicking", + b"", + None, + None, + ) + .maybe_cursor + .is_some() + { + log::error!( + target: LOG_TARGET, + "Storage prefix 'UpForKicking' is not completely cleared." + ); + } + + T::DbWeight::get().writes(1) + } +} diff --git a/frame/alliance/src/mock.rs b/frame/alliance/src/mock.rs index d6e9a92a10dec..adc313e28ed7e 100644 --- a/frame/alliance/src/mock.rs +++ b/frame/alliance/src/mock.rs @@ -26,7 +26,7 @@ pub use sp_runtime::{ use sp_std::convert::{TryFrom, TryInto}; pub use frame_support::{ - assert_ok, ord_parameter_types, parameter_types, + assert_noop, assert_ok, ord_parameter_types, parameter_types, traits::{EitherOfDiverse, GenesisBuild, SortedMembers}, BoundedVec, }; @@ -37,8 +37,10 @@ pub use crate as pallet_alliance; use super::*; +type BlockNumber = u64; + parameter_types! { - pub const BlockHashCount: u64 = 250; + pub const BlockHashCount: BlockNumber = 250; } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -47,7 +49,7 @@ impl frame_system::Config for Test { type Origin = Origin; type Call = Call; type Index = u64; - type BlockNumber = u64; + type BlockNumber = BlockNumber; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; @@ -83,8 +85,10 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; } +const MOTION_DURATION_IN_BLOCKS: BlockNumber = 3; + parameter_types! { - pub const MotionDuration: u64 = 3; + pub const MotionDuration: BlockNumber = MOTION_DURATION_IN_BLOCKS; pub const MaxProposals: u32 = 100; pub const MaxMembers: u32 = 100; } @@ -199,6 +203,7 @@ parameter_types! { pub const MaxFellows: u32 = MaxMembers::get() - MaxFounders::get(); pub const MaxAllies: u32 = 100; pub const AllyDeposit: u64 = 25; + pub const RetirementPeriod: BlockNumber = MOTION_DURATION_IN_BLOCKS + 1; } impl Config for Test { type Event = Event; @@ -225,6 +230,7 @@ impl Config for Test { type MaxMembersCount = MaxMembers; type AllyDeposit = AllyDeposit; type WeightInfo = (); + type RetirementPeriod = RetirementPeriod; } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -291,6 +297,12 @@ pub fn new_test_ext() -> sp_io::TestExternalities { assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 5, Judgement::KnownGood)); assert_ok!(Identity::set_identity(Origin::signed(6), Box::new(info.clone()))); + // Joining before init should fail. + assert_noop!( + Alliance::join_alliance(Origin::signed(1)), + Error::::AllianceNotYetInitialized + ); + assert_ok!(Alliance::init_members(Origin::root(), vec![1, 2], vec![3], vec![])); System::set_block_number(1); @@ -318,7 +330,3 @@ pub fn make_proposal(value: u64) -> Call { pub fn make_set_rule_proposal(rule: Cid) -> Call { Call::Alliance(pallet_alliance::Call::set_rule { rule }) } - -pub fn make_kick_member_proposal(who: u64) -> Call { - Call::Alliance(pallet_alliance::Call::kick_member { who }) -} diff --git a/frame/alliance/src/tests.rs b/frame/alliance/src/tests.rs index 85c91b451d351..918cfa840c3f0 100644 --- a/frame/alliance/src/tests.rs +++ b/frame/alliance/src/tests.rs @@ -19,7 +19,7 @@ use sp_runtime::traits::Hash; -use frame_support::{assert_noop, assert_ok, Hashable}; +use frame_support::{assert_noop, assert_ok, error::BadOrigin, Hashable}; use frame_system::{EventRecord, Phase}; use super::*; @@ -76,7 +76,7 @@ fn vote_works() { Box::new(proposal.clone()), proposal_len )); - assert_ok!(Alliance::vote(Origin::signed(2), hash.clone(), 0, true)); + assert_ok!(Alliance::vote(Origin::signed(2), hash, 0, true)); let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!( @@ -85,12 +85,12 @@ fn vote_works() { record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, - proposal_hash: hash.clone(), + proposal_hash: hash, threshold: 3 })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { account: 2, - proposal_hash: hash.clone(), + proposal_hash: hash, voted: true, yes: 1, no: 0, @@ -114,7 +114,7 @@ fn veto_works() { )); // only set_rule/elevate_ally can be veto assert_noop!( - Alliance::veto(Origin::signed(1), hash.clone()), + Alliance::veto(Origin::signed(1), hash), Error::::NotVetoableProposal ); @@ -131,11 +131,11 @@ fn veto_works() { // only founder have veto rights, 3 is fellow assert_noop!( - Alliance::veto(Origin::signed(3), vetoable_hash.clone()), + Alliance::veto(Origin::signed(3), vetoable_hash), Error::::NotFounder ); - assert_ok!(Alliance::veto(Origin::signed(2), vetoable_hash.clone())); + assert_ok!(Alliance::veto(Origin::signed(2), vetoable_hash)); let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!( System::events(), @@ -143,17 +143,17 @@ fn veto_works() { record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, - proposal_hash: hash.clone(), + proposal_hash: hash, threshold: 3 })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 1, - proposal_hash: vetoable_hash.clone(), + proposal_hash: vetoable_hash, threshold: 3 })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Disapproved { - proposal_hash: vetoable_hash.clone() + proposal_hash: vetoable_hash })), ] ); @@ -173,16 +173,10 @@ fn close_works() { Box::new(proposal.clone()), proposal_len )); - assert_ok!(Alliance::vote(Origin::signed(1), hash.clone(), 0, true)); - assert_ok!(Alliance::vote(Origin::signed(2), hash.clone(), 0, true)); - assert_ok!(Alliance::vote(Origin::signed(3), hash.clone(), 0, true)); - assert_ok!(Alliance::close( - Origin::signed(1), - hash.clone(), - 0, - proposal_weight, - proposal_len - )); + assert_ok!(Alliance::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Alliance::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Alliance::vote(Origin::signed(3), hash, 0, true)); + assert_ok!(Alliance::close(Origin::signed(1), hash, 0, proposal_weight, proposal_len)); let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!( @@ -191,40 +185,40 @@ fn close_works() { record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, - proposal_hash: hash.clone(), + proposal_hash: hash, threshold: 3 })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { account: 1, - proposal_hash: hash.clone(), + proposal_hash: hash, voted: true, yes: 1, no: 0, })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { account: 2, - proposal_hash: hash.clone(), + proposal_hash: hash, voted: true, yes: 2, no: 0, })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { account: 3, - proposal_hash: hash.clone(), + proposal_hash: hash, voted: true, yes: 3, no: 0, })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Closed { - proposal_hash: hash.clone(), + proposal_hash: hash, yes: 3, no: 0, })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Approved { - proposal_hash: hash.clone() + proposal_hash: hash })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Executed { - proposal_hash: hash.clone(), + proposal_hash: hash, result: Err(DispatchError::BadOrigin), })) ] @@ -247,6 +241,9 @@ fn set_rule_works() { fn announce_works() { new_test_ext().execute_with(|| { let cid = test_cid(); + + assert_noop!(Alliance::announce(Origin::signed(2), cid.clone()), BadOrigin); + assert_ok!(Alliance::announce(Origin::signed(3), cid.clone())); assert_eq!(Alliance::announcements(), vec![cid.clone()]); @@ -396,48 +393,111 @@ fn elevate_ally_works() { } #[test] -fn retire_works() { +fn give_retirement_notice_work() { new_test_ext().execute_with(|| { - let proposal = make_kick_member_proposal(2); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - assert_ok!(Alliance::propose( - Origin::signed(1), - 3, - Box::new(proposal.clone()), - proposal_len + assert_noop!( + Alliance::give_retirement_notice(Origin::signed(4)), + Error::::NotMember + ); + + assert_eq!(Alliance::members(MemberRole::Fellow), vec![3]); + assert_ok!(Alliance::give_retirement_notice(Origin::signed(3))); + assert_eq!(Alliance::members(MemberRole::Fellow), Vec::::new()); + assert_eq!(Alliance::members(MemberRole::Retiring), vec![3]); + System::assert_last_event(mock::Event::Alliance( + crate::Event::MemberRetirementPeriodStarted { member: (3) }, )); - assert_noop!(Alliance::retire(Origin::signed(2)), Error::::UpForKicking); - assert_noop!(Alliance::retire(Origin::signed(4)), Error::::NotMember); + assert_noop!( + Alliance::give_retirement_notice(Origin::signed(3)), + Error::::AlreadyRetiring + ); + }); +} + +#[test] +fn retire_works() { + new_test_ext().execute_with(|| { + assert_noop!( + Alliance::retire(Origin::signed(2)), + Error::::RetirementNoticeNotGiven + ); + + assert_noop!( + Alliance::retire(Origin::signed(4)), + Error::::RetirementNoticeNotGiven + ); assert_eq!(Alliance::members(MemberRole::Fellow), vec![3]); + assert_ok!(Alliance::give_retirement_notice(Origin::signed(3))); + assert_noop!( + Alliance::retire(Origin::signed(3)), + Error::::RetirementPeriodNotPassed + ); + System::set_block_number(System::block_number() + RetirementPeriod::get()); assert_ok!(Alliance::retire(Origin::signed(3))); assert_eq!(Alliance::members(MemberRole::Fellow), Vec::::new()); + System::assert_last_event(mock::Event::Alliance(crate::Event::MemberRetired { + member: (3), + unreserved: None, + })); + + // Move time on: + System::set_block_number(System::block_number() + RetirementPeriod::get()); + + assert_powerless(Origin::signed(3)); }); } +fn assert_powerless(user: Origin) { + //vote / veto with a valid propsal + let cid = test_cid(); + let proposal = make_proposal(42); + + assert_noop!(Alliance::init_members(user.clone(), vec![], vec![], vec![]), BadOrigin); + + assert_noop!(Alliance::set_rule(user.clone(), cid.clone()), BadOrigin); + + assert_noop!(Alliance::retire(user.clone()), Error::::RetirementNoticeNotGiven); + + assert_noop!(Alliance::give_retirement_notice(user.clone()), Error::::NotMember); + + assert_noop!(Alliance::elevate_ally(user.clone(), 4), BadOrigin); + + assert_noop!(Alliance::kick_member(user.clone(), 1), BadOrigin); + + assert_noop!(Alliance::nominate_ally(user.clone(), 4), Error::::NoVotingRights); + + assert_noop!( + Alliance::propose(user.clone(), 5, Box::new(proposal), 1000), + Error::::NoVotingRights + ); +} + #[test] fn kick_member_works() { new_test_ext().execute_with(|| { - let proposal = make_kick_member_proposal(2); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - assert_ok!(Alliance::propose( - Origin::signed(1), - 3, - Box::new(proposal.clone()), - proposal_len - )); - assert_eq!(Alliance::up_for_kicking(2), true); - assert_eq!(Alliance::members(MemberRole::Founder), vec![1, 2]); + assert_noop!(Alliance::kick_member(Origin::signed(4), 4), BadOrigin); + + assert_noop!(Alliance::kick_member(Origin::signed(2), 4), Error::::NotMember); + >::insert(2, 25); + assert_eq!(Alliance::members(MemberRole::Founder), vec![1, 2]); assert_ok!(Alliance::kick_member(Origin::signed(2), 2)); assert_eq!(Alliance::members(MemberRole::Founder), vec![1]); + assert_eq!(>::get(2), None); + System::assert_last_event(mock::Event::Alliance(crate::Event::MemberKicked { + member: (2), + slashed: Some(25), + })); }); } #[test] fn add_unscrupulous_items_works() { new_test_ext().execute_with(|| { + assert_noop!(Alliance::add_unscrupulous_items(Origin::signed(2), vec![]), BadOrigin); + assert_ok!(Alliance::add_unscrupulous_items( Origin::signed(3), vec![ @@ -461,6 +521,8 @@ fn add_unscrupulous_items_works() { #[test] fn remove_unscrupulous_items_works() { new_test_ext().execute_with(|| { + assert_noop!(Alliance::remove_unscrupulous_items(Origin::signed(2), vec![]), BadOrigin); + assert_noop!( Alliance::remove_unscrupulous_items( Origin::signed(3), diff --git a/frame/alliance/src/weights.rs b/frame/alliance/src/weights.rs index 495dd1b83df93..048bea3d1e5a4 100644 --- a/frame/alliance/src/weights.rs +++ b/frame/alliance/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,29 +18,30 @@ //! Autogenerated weights for pallet_alliance //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-10-11, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 +//! DATE: 2022-08-26, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/release/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark -// --chain=dev +// pallet // --steps=50 // --repeat=20 -// --pallet=pallet_alliance // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 +// --pallet=pallet_alliance +// --chain=dev // --output=./frame/alliance/src/weights.rs // --template=./.maintain/frame-weight-template.hbs - #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_alliance. @@ -59,6 +60,7 @@ pub trait WeightInfo { fn join_alliance() -> Weight; fn nominate_ally() -> Weight; fn elevate_ally() -> Weight; + fn give_retirement_notice() -> Weight; fn retire() -> Weight; fn kick_member() -> Weight; fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight; @@ -73,173 +75,199 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: AllianceMotion ProposalCount (r:1 w:1) // Storage: AllianceMotion Voting (r:0 w:1) - fn propose_proposed(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - (39_992_000 as Weight) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[0, 90]`. + /// The range of component `p` is `[1, 100]`. + fn propose_proposed(b: u32, x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(22_575_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(11_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + // Standard Error: 23_000 + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((44_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(90_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((323_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(216_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Alliance Members (r:2 w:0) // Storage: AllianceMotion Voting (r:1 w:1) + /// The range of component `x` is `[3, 10]`. + /// The range of component `y` is `[2, 90]`. fn vote(x: u32, y: u32, ) -> Weight { - (36_649_000 as Weight) - // Standard Error: 90_000 - .saturating_add((42_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 3_000 - .saturating_add((195_000 as Weight).saturating_mul(y as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(45_486_000 as RefTimeWeight) + // Standard Error: 29_000 + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(128_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: AllianceMotion Voting (r:0 w:1) + /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - (30_301_000 as Weight) - // Standard Error: 1_000 - .saturating_add((330_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(35_296_000 as RefTimeWeight) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(171_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) + // Storage: AllianceMotion ProposalOf (r:0 w:1) + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - (40_472_000 as Weight) - // Standard Error: 69_000 - .saturating_add((485_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 2_000 - .saturating_add((192_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 2_000 - .saturating_add((330_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(39_252_000 as RefTimeWeight) + // Standard Error: 18_000 + .saturating_add(Weight::from_ref_time(75_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(170_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) + // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Proposals (r:1 w:1) - fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - (52_076_000 as Weight) - // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - // Standard Error: 77_000 - .saturating_add((194_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 3_000 - .saturating_add((188_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 2_000 - .saturating_add((329_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(50_357_000 as RefTimeWeight) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(103_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(204_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Prime (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - fn close_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - (47_009_000 as Weight) - // Standard Error: 66_000 - .saturating_add((256_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 2_000 - .saturating_add((176_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 2_000 - .saturating_add((327_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + // Storage: AllianceMotion ProposalOf (r:0 w:1) + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. + fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(41_258_000 as RefTimeWeight) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(111_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Prime (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - fn close_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - (43_650_000 as Weight) - // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) - // Standard Error: 85_000 - .saturating_add((124_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 3_000 - .saturating_add((199_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 3_000 - .saturating_add((326_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - // Storage: Alliance Members (r:3 w:3) + // Storage: AllianceMotion ProposalOf (r:0 w:1) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. + fn close_approved(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(40_490_000 as RefTimeWeight) + // Standard Error: 16_000 + .saturating_add(Weight::from_ref_time(119_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(195_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + } + // Storage: Alliance Members (r:2 w:3) // Storage: AllianceMotion Members (r:1 w:1) + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[0, 90]`. + /// The range of component `z` is `[0, 100]`. fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { - (45_100_000 as Weight) - // Standard Error: 4_000 - .saturating_add((162_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 4_000 - .saturating_add((151_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(35_186_000 as RefTimeWeight) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(127_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - (14_517_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_189_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - (16_801_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_106_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - (17_133_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_208_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } + // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) - // Storage: Alliance Members (r:4 w:0) // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - (95_370_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(53_771_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } - // Storage: Alliance Members (r:4 w:0) + // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - (44_764_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(41_912_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Members (r:3 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - (44_013_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(36_811_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } - // Storage: Alliance KickingMembers (r:1 w:0) - // Storage: Alliance Members (r:3 w:1) + // Storage: Alliance Members (r:4 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) - // Storage: Alliance DepositOf (r:1 w:1) - // Storage: System Account (r:1 w:1) // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) + // Storage: Alliance RetiringMembers (r:0 w:1) + fn give_retirement_notice() -> Weight { + Weight::from_ref_time(41_079_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + } + // Storage: Alliance RetiringMembers (r:1 w:1) + // Storage: Alliance Members (r:1 w:1) + // Storage: Alliance DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) fn retire() -> Weight { - (60_183_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(42_703_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } - // Storage: Alliance KickingMembers (r:1 w:0) // Storage: Alliance Members (r:3 w:1) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: Alliance DepositOf (r:1 w:1) @@ -247,31 +275,35 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - (67_467_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(61_370_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) + /// The range of component `n` is `[1, 100]`. + /// The range of component `l` is `[1, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 16_000 - .saturating_add((2_673_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 7_000 - .saturating_add((224_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(0 as RefTimeWeight) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_385_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(119_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) + /// The range of component `n` is `[1, 100]`. + /// The range of component `l` is `[1, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 343_000 - .saturating_add((59_025_000 as Weight).saturating_mul(n as Weight)) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 153_000 - .saturating_add((6_725_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(21_484_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + // Standard Error: 59_000 + .saturating_add(Weight::from_ref_time(3_772_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -282,173 +314,199 @@ impl WeightInfo for () { // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: AllianceMotion ProposalCount (r:1 w:1) // Storage: AllianceMotion Voting (r:0 w:1) - fn propose_proposed(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - (39_992_000 as Weight) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[0, 90]`. + /// The range of component `p` is `[1, 100]`. + fn propose_proposed(b: u32, x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(22_575_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(11_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + // Standard Error: 23_000 + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((44_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(90_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((323_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(216_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Alliance Members (r:2 w:0) // Storage: AllianceMotion Voting (r:1 w:1) + /// The range of component `x` is `[3, 10]`. + /// The range of component `y` is `[2, 90]`. fn vote(x: u32, y: u32, ) -> Weight { - (36_649_000 as Weight) - // Standard Error: 90_000 - .saturating_add((42_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 3_000 - .saturating_add((195_000 as Weight).saturating_mul(y as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(45_486_000 as RefTimeWeight) + // Standard Error: 29_000 + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(128_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: AllianceMotion Voting (r:0 w:1) + /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - (30_301_000 as Weight) - // Standard Error: 1_000 - .saturating_add((330_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(35_296_000 as RefTimeWeight) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(171_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) + // Storage: AllianceMotion ProposalOf (r:0 w:1) + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - (40_472_000 as Weight) - // Standard Error: 69_000 - .saturating_add((485_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 2_000 - .saturating_add((192_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 2_000 - .saturating_add((330_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(39_252_000 as RefTimeWeight) + // Standard Error: 18_000 + .saturating_add(Weight::from_ref_time(75_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(170_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) + // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Proposals (r:1 w:1) - fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - (52_076_000 as Weight) - // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - // Standard Error: 77_000 - .saturating_add((194_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 3_000 - .saturating_add((188_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 2_000 - .saturating_add((329_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(50_357_000 as RefTimeWeight) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(103_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(204_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Prime (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - fn close_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - (47_009_000 as Weight) - // Standard Error: 66_000 - .saturating_add((256_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 2_000 - .saturating_add((176_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 2_000 - .saturating_add((327_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + // Storage: AllianceMotion ProposalOf (r:0 w:1) + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. + fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(41_258_000 as RefTimeWeight) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(111_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Prime (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - fn close_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - (43_650_000 as Weight) - // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) - // Standard Error: 85_000 - .saturating_add((124_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 3_000 - .saturating_add((199_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 3_000 - .saturating_add((326_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - // Storage: Alliance Members (r:3 w:3) + // Storage: AllianceMotion ProposalOf (r:0 w:1) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. + fn close_approved(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(40_490_000 as RefTimeWeight) + // Standard Error: 16_000 + .saturating_add(Weight::from_ref_time(119_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(195_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + } + // Storage: Alliance Members (r:2 w:3) // Storage: AllianceMotion Members (r:1 w:1) + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[0, 90]`. + /// The range of component `z` is `[0, 100]`. fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { - (45_100_000 as Weight) - // Standard Error: 4_000 - .saturating_add((162_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 4_000 - .saturating_add((151_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(35_186_000 as RefTimeWeight) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(127_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - (14_517_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_189_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - (16_801_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_106_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - (17_133_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_208_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } + // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) - // Storage: Alliance Members (r:4 w:0) // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - (95_370_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(53_771_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } - // Storage: Alliance Members (r:4 w:0) + // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - (44_764_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(41_912_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Members (r:3 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - (44_013_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(36_811_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } - // Storage: Alliance KickingMembers (r:1 w:0) - // Storage: Alliance Members (r:3 w:1) + // Storage: Alliance Members (r:4 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) - // Storage: Alliance DepositOf (r:1 w:1) - // Storage: System Account (r:1 w:1) // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) + // Storage: Alliance RetiringMembers (r:0 w:1) + fn give_retirement_notice() -> Weight { + Weight::from_ref_time(41_079_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + } + // Storage: Alliance RetiringMembers (r:1 w:1) + // Storage: Alliance Members (r:1 w:1) + // Storage: Alliance DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) fn retire() -> Weight { - (60_183_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(42_703_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } - // Storage: Alliance KickingMembers (r:1 w:0) // Storage: Alliance Members (r:3 w:1) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: Alliance DepositOf (r:1 w:1) @@ -456,30 +514,34 @@ impl WeightInfo for () { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - (67_467_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(61_370_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) + /// The range of component `n` is `[1, 100]`. + /// The range of component `l` is `[1, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 16_000 - .saturating_add((2_673_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 7_000 - .saturating_add((224_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(0 as RefTimeWeight) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_385_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(119_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) + /// The range of component `n` is `[1, 100]`. + /// The range of component `l` is `[1, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 343_000 - .saturating_add((59_025_000 as Weight).saturating_mul(n as Weight)) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 153_000 - .saturating_add((6_725_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(21_484_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + // Standard Error: 59_000 + .saturating_add(Weight::from_ref_time(3_772_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index ca88899edf842..043d3f1a1aef4 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -37,7 +37,7 @@ const SEED: u32 = 0; fn create_default_asset, I: 'static>( is_sufficient: bool, -) -> (T::AccountId, ::Source) { +) -> (T::AccountId, AccountIdLookupOf) { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let root = SystemOrigin::Root.into(); @@ -55,7 +55,7 @@ fn create_default_asset, I: 'static>( fn create_default_minted_asset, I: 'static>( is_sufficient: bool, amount: T::Balance, -) -> (T::AccountId, ::Source) { +) -> (T::AccountId, AccountIdLookupOf) { let (caller, caller_lookup) = create_default_asset::(is_sufficient); if !is_sufficient { T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 3f9146c5d229b..d7ca83c8a84e3 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -79,8 +79,6 @@ //! * `create`: Creates a new asset class, taking the required deposit. //! * `transfer`: Transfer sender's assets to another account. //! * `transfer_keep_alive`: Transfer sender's assets to another account, keeping the sender alive. -//! * `set_metadata`: Set the metadata of an asset class. -//! * `clear_metadata`: Remove the metadata of an asset class. //! * `approve_transfer`: Create or increase an delegated transfer. //! * `cancel_approval`: Rescind a previous approval. //! * `transfer_approved`: Transfer third-party's assets to another account. @@ -103,6 +101,8 @@ //! * `transfer_ownership`: Changes an asset class's Owner; called by the asset class's Owner. //! * `set_team`: Changes an asset class's Admin, Freezer and Issuer; called by the asset class's //! Owner. +//! * `set_metadata`: Set the metadata of an asset class; called by the asset class's Owner. +//! * `clear_metadata`: Remove the metadata of an asset class; called by the asset class's Owner. //! //! Please refer to the [`Call`] enum and its associated variants for documentation on each //! function. @@ -164,6 +164,8 @@ use frame_system::Config as SystemConfig; pub use pallet::*; pub use weights::WeightInfo; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -501,7 +503,7 @@ pub mod pallet { pub fn create( origin: OriginFor, #[pallet::compact] id: T::AssetId, - admin: ::Source, + admin: AccountIdLookupOf, min_balance: T::Balance, ) -> DispatchResult { let owner = ensure_signed(origin)?; @@ -557,7 +559,7 @@ pub mod pallet { pub fn force_create( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: ::Source, + owner: AccountIdLookupOf, is_sufficient: bool, #[pallet::compact] min_balance: T::Balance, ) -> DispatchResult { @@ -623,7 +625,7 @@ pub mod pallet { pub fn mint( origin: OriginFor, #[pallet::compact] id: T::AssetId, - beneficiary: ::Source, + beneficiary: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -651,7 +653,7 @@ pub mod pallet { pub fn burn( origin: OriginFor, #[pallet::compact] id: T::AssetId, - who: ::Source, + who: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -684,7 +686,7 @@ pub mod pallet { pub fn transfer( origin: OriginFor, #[pallet::compact] id: T::AssetId, - target: ::Source, + target: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -716,7 +718,7 @@ pub mod pallet { pub fn transfer_keep_alive( origin: OriginFor, #[pallet::compact] id: T::AssetId, - target: ::Source, + target: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let source = ensure_signed(origin)?; @@ -749,8 +751,8 @@ pub mod pallet { pub fn force_transfer( origin: OriginFor, #[pallet::compact] id: T::AssetId, - source: ::Source, - dest: ::Source, + source: AccountIdLookupOf, + dest: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -775,7 +777,7 @@ pub mod pallet { pub fn freeze( origin: OriginFor, #[pallet::compact] id: T::AssetId, - who: ::Source, + who: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -806,7 +808,7 @@ pub mod pallet { pub fn thaw( origin: OriginFor, #[pallet::compact] id: T::AssetId, - who: ::Source, + who: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -891,7 +893,7 @@ pub mod pallet { pub fn transfer_ownership( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: ::Source, + owner: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; @@ -932,9 +934,9 @@ pub mod pallet { pub fn set_team( origin: OriginFor, #[pallet::compact] id: T::AssetId, - issuer: ::Source, - admin: ::Source, - freezer: ::Source, + issuer: AccountIdLookupOf, + admin: AccountIdLookupOf, + freezer: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; let issuer = T::Lookup::lookup(issuer)?; @@ -1117,10 +1119,10 @@ pub mod pallet { pub fn force_asset_status( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: ::Source, - issuer: ::Source, - admin: ::Source, - freezer: ::Source, + owner: AccountIdLookupOf, + issuer: AccountIdLookupOf, + admin: AccountIdLookupOf, + freezer: AccountIdLookupOf, #[pallet::compact] min_balance: T::Balance, is_sufficient: bool, is_frozen: bool, @@ -1167,7 +1169,7 @@ pub mod pallet { pub fn approve_transfer( origin: OriginFor, #[pallet::compact] id: T::AssetId, - delegate: ::Source, + delegate: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let owner = ensure_signed(origin)?; @@ -1192,7 +1194,7 @@ pub mod pallet { pub fn cancel_approval( origin: OriginFor, #[pallet::compact] id: T::AssetId, - delegate: ::Source, + delegate: AccountIdLookupOf, ) -> DispatchResult { let owner = ensure_signed(origin)?; let delegate = T::Lookup::lookup(delegate)?; @@ -1225,8 +1227,8 @@ pub mod pallet { pub fn force_cancel_approval( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: ::Source, - delegate: ::Source, + owner: AccountIdLookupOf, + delegate: AccountIdLookupOf, ) -> DispatchResult { let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; T::ForceOrigin::try_origin(origin) @@ -1272,8 +1274,8 @@ pub mod pallet { pub fn transfer_approved( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: ::Source, - destination: ::Source, + owner: AccountIdLookupOf, + destination: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let delegate = ensure_signed(origin)?; diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 67690e2b28ec1..0fd4dd3281516 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -115,11 +115,11 @@ thread_local! { pub struct TestFreezer; impl FrozenBalance for TestFreezer { fn frozen_balance(asset: u32, who: &u64) -> Option { - FROZEN.with(|f| f.borrow().get(&(asset, who.clone())).cloned()) + FROZEN.with(|f| f.borrow().get(&(asset, *who)).cloned()) } fn died(asset: u32, who: &u64) { - HOOKS.with(|h| h.borrow_mut().push(Hook::Died(asset, who.clone()))); + HOOKS.with(|h| h.borrow_mut().push(Hook::Died(asset, *who))); // Sanity check: dead accounts have no balance. assert!(Assets::balance(asset, *who).is_zero()); } diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs index e8f1184cf570f..971728df46c24 100644 --- a/frame/assets/src/weights.rs +++ b/frame/assets/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_assets. @@ -74,15 +74,15 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Assets Asset (r:1 w:1) fn create() -> Weight { - (27_167_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(27_167_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn force_create() -> Weight { - (15_473_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(15_473_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:5002 w:5001) @@ -90,168 +90,168 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Metadata (r:1 w:0) // Storage: Assets Approvals (r:501 w:500) fn destroy(c: u32, s: u32, a: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 37_000 - .saturating_add((17_145_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) // Standard Error: 37_000 - .saturating_add((19_333_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(19_333_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 375_000 - .saturating_add((17_046_000 as Weight).saturating_mul(a as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) - .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(a as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(c as Weight))) - .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + .saturating_add(Weight::from_ref_time(17_046_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn mint() -> Weight { - (30_819_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(30_819_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn burn() -> Weight { - (35_212_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(35_212_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (47_401_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(47_401_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - (42_300_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(42_300_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - (47_946_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(47_946_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn freeze() -> Weight { - (21_670_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_670_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn thaw() -> Weight { - (21_503_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_503_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn freeze_asset() -> Weight { - (18_158_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_158_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn thaw_asset() -> Weight { - (18_525_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_525_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Metadata (r:1 w:0) fn transfer_ownership() -> Weight { - (19_858_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_858_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn set_team() -> Weight { - (18_045_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_045_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn set_metadata(n: u32, s: u32, ) -> Weight { - (32_395_000 as Weight) + Weight::from_ref_time(32_395_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn clear_metadata() -> Weight { - (32_893_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(32_893_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - (19_586_000 as Weight) + Weight::from_ref_time(19_586_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_clear_metadata() -> Weight { - (32_478_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(32_478_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn force_asset_status() -> Weight { - (17_143_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(17_143_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn approve_transfer() -> Weight { - (36_389_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(36_389_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Approvals (r:1 w:1) // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_approved() -> Weight { - (61_854_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(61_854_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn cancel_approval() -> Weight { - (36_759_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(36_759_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn force_cancel_approval() -> Weight { - (37_753_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_753_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -259,15 +259,15 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Assets Asset (r:1 w:1) fn create() -> Weight { - (27_167_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(27_167_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn force_create() -> Weight { - (15_473_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(15_473_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:5002 w:5001) @@ -275,167 +275,167 @@ impl WeightInfo for () { // Storage: Assets Metadata (r:1 w:0) // Storage: Assets Approvals (r:501 w:500) fn destroy(c: u32, s: u32, a: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 37_000 - .saturating_add((17_145_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) // Standard Error: 37_000 - .saturating_add((19_333_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(19_333_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 375_000 - .saturating_add((17_046_000 as Weight).saturating_mul(a as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) - .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(a as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(c as Weight))) - .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + .saturating_add(Weight::from_ref_time(17_046_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn mint() -> Weight { - (30_819_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(30_819_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn burn() -> Weight { - (35_212_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(35_212_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (47_401_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(47_401_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - (42_300_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(42_300_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - (47_946_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(47_946_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn freeze() -> Weight { - (21_670_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_670_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn thaw() -> Weight { - (21_503_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_503_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn freeze_asset() -> Weight { - (18_158_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_158_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn thaw_asset() -> Weight { - (18_525_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_525_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Metadata (r:1 w:0) fn transfer_ownership() -> Weight { - (19_858_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_858_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn set_team() -> Weight { - (18_045_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_045_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn set_metadata(n: u32, s: u32, ) -> Weight { - (32_395_000 as Weight) + Weight::from_ref_time(32_395_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn clear_metadata() -> Weight { - (32_893_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(32_893_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - (19_586_000 as Weight) + Weight::from_ref_time(19_586_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_clear_metadata() -> Weight { - (32_478_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(32_478_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn force_asset_status() -> Weight { - (17_143_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(17_143_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn approve_transfer() -> Weight { - (36_389_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(36_389_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Approvals (r:1 w:1) // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_approved() -> Weight { - (61_854_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(61_854_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn cancel_approval() -> Weight { - (36_759_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(36_759_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn force_cancel_approval() -> Weight { - (37_753_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_753_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 1ddf3888d3c96..1b6c62c55ee20 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -243,7 +243,7 @@ pub mod pallet { /// - `duration`: Locked duration of the atomic swap. For safety reasons, it is recommended /// that the revealer uses a shorter duration than the counterparty, to prevent the /// situation where the revealer reveals the proof too late around the end block. - #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).ref_time().saturating_add(40_000_000))] pub fn create_swap( origin: OriginFor, target: T::AccountId, @@ -280,9 +280,10 @@ pub mod pallet { /// the operation fails. This is used for weight calculation. #[pallet::weight( T::DbWeight::get().reads_writes(1, 1) - .saturating_add(40_000_000) - .saturating_add((proof.len() as Weight).saturating_mul(100)) .saturating_add(action.weight()) + .ref_time() + .saturating_add(40_000_000) + .saturating_add((proof.len() as u64).saturating_mul(100)) )] pub fn claim_swap( origin: OriginFor, @@ -317,7 +318,7 @@ pub mod pallet { /// /// - `target`: Target of the original atomic swap. /// - `hashed_proof`: Hashed proof of the original atomic swap. - #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).ref_time().saturating_add(40_000_000))] pub fn cancel_swap( origin: OriginFor, target: T::AccountId, @@ -333,7 +334,7 @@ pub mod pallet { ); swap.action.cancel(&swap.source); - PendingSwaps::::remove(&target, hashed_proof.clone()); + PendingSwaps::::remove(&target, hashed_proof); Self::deposit_event(Event::SwapCancelled { account: target, proof: hashed_proof }); diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index 2352e7852d090..ffb548a1f29ed 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -30,7 +30,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -102,7 +102,7 @@ fn two_party_successful_swap() { AtomicSwap::create_swap( Origin::signed(A), B, - hashed_proof.clone(), + hashed_proof, BalanceSwapAction::new(50), 1000, ) @@ -117,7 +117,7 @@ fn two_party_successful_swap() { AtomicSwap::create_swap( Origin::signed(B), A, - hashed_proof.clone(), + hashed_proof, BalanceSwapAction::new(75), 1000, ) diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index 636a28692ba28..5023feeaf8aea 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -49,7 +49,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index a56d8e785f6ac..d5f9783f153c8 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -227,7 +227,7 @@ mod tests { pub const Period: BlockNumber = 1; pub const Offset: BlockNumber = 0; pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 8ddccfd9cf939..ff18e047db048 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -190,7 +190,7 @@ pub mod pallet { T::EventHandler::note_author(author); } - 0 + Weight::zero() } fn on_finalize(_: T::BlockNumber) { @@ -460,7 +460,7 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/babe/src/default_weights.rs b/frame/babe/src/default_weights.rs index 57c74323b7932..cc1d11108b2e1 100644 --- a/frame/babe/src/default_weights.rs +++ b/frame/babe/src/default_weights.rs @@ -38,8 +38,8 @@ impl crate::WeightInfo for () { const MAX_NOMINATORS: u64 = 200; // checking membership proof - (35 * WEIGHT_PER_MICROS) - .saturating_add((175 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) + let ref_time_weight = (35 * WEIGHT_PER_MICROS) + .saturating_add((175 * WEIGHT_PER_NANOS).scalar_saturating_mul(validator_count)) .saturating_add(DbWeight::get().reads(5)) // check equivocation proof .saturating_add(110 * WEIGHT_PER_MICROS) @@ -47,6 +47,8 @@ impl crate::WeightInfo for () { .saturating_add(110 * WEIGHT_PER_MICROS) .saturating_add(25 * WEIGHT_PER_MICROS * MAX_NOMINATORS) .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) - .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) + .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)); + + ref_time_weight } } diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index df46f3544b389..f55bda751887d 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -284,9 +284,9 @@ impl Offence self.slot } - fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { + fn slash_fraction(&self, offenders_count: u32) -> Perbill { // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational(3 * offenders_count, validator_set_count); + let x = Perbill::from_rational(3 * offenders_count, self.validator_set_count); // _ ^ 2 x.square() } diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 1effc2c1989fa..48ca62a5b1a09 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -336,7 +336,7 @@ pub mod pallet { /// Initialization fn on_initialize(now: BlockNumberFor) -> Weight { Self::initialize(now); - 0 + Weight::zero() } /// Block finalization @@ -1008,6 +1008,6 @@ pub mod migrations { writes += 3; - T::DbWeight::get().writes(writes) + T::DbWeight::get().reads(reads) + T::DbWeight::get().reads_writes(reads, writes) } } diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 5677eb7e28e49..3a6348404b9a7 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -66,7 +66,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { @@ -429,7 +429,7 @@ pub fn generate_equivocation_proof( System::reset_events(); System::initialize(¤t_block, &parent_hash, &pre_digest); System::set_block_number(current_block); - Timestamp::set_timestamp(current_block); + Timestamp::set_timestamp(*current_slot * Babe::slot_duration()); System::finalize() }; diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 0859bb7a40849..2f967b658e396 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -659,7 +659,7 @@ fn report_equivocation_invalid_equivocation_proof() { equivocation_proof.second_header = equivocation_proof.first_header.clone(); assert_invalid_equivocation(equivocation_proof); - // missing preruntime digest from one header + // missing pre-runtime digest from one header let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, @@ -852,7 +852,7 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight > 0); + assert!(info.weight > Weight::zero()); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/bags-list/src/benchmarks.rs b/frame/bags-list/src/benchmarks.rs index dba0c9ee1e623..1f66697cb6765 100644 --- a/frame/bags-list/src/benchmarks.rs +++ b/frame/bags-list/src/benchmarks.rs @@ -25,7 +25,7 @@ use frame_support::{assert_ok, traits::Get}; use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::One; -frame_benchmarking::benchmarks! { +frame_benchmarking::benchmarks_instance_pallet! { rebag_non_terminal { // An expensive case for rebag-ing (rebag a non-terminal node): // @@ -57,6 +57,8 @@ frame_benchmarking::benchmarks! { let dest_head: T::AccountId = account("dest_head", 0, 0); assert_ok!(List::::insert(dest_head.clone(), dest_bag_thresh)); + let origin_middle_lookup = T::Lookup::unlookup(origin_middle.clone()); + // the bags are in the expected state after initial setup. assert_eq!( List::::get_bags(), @@ -69,7 +71,7 @@ frame_benchmarking::benchmarks! { let caller = whitelisted_caller(); // update the weight of `origin_middle` to guarantee it will be rebagged into the destination. T::ScoreProvider::set_score_of(&origin_middle, dest_bag_thresh); - }: rebag(SystemOrigin::Signed(caller), origin_middle.clone()) + }: rebag(SystemOrigin::Signed(caller), origin_middle_lookup.clone()) verify { // check the bags have updated as expected. assert_eq!( @@ -97,7 +99,7 @@ frame_benchmarking::benchmarks! { // clear any pre-existing storage. // NOTE: safe to call outside block production - List::::unsafe_clear(); + List::::unsafe_clear(); // define our origin and destination thresholds. let origin_bag_thresh = T::BagThresholds::get()[0]; @@ -114,6 +116,8 @@ frame_benchmarking::benchmarks! { let dest_head: T::AccountId = account("dest_head", 0, 0); assert_ok!(List::::insert(dest_head.clone(), dest_bag_thresh)); + let origin_tail_lookup = T::Lookup::unlookup(origin_tail.clone()); + // the bags are in the expected state after initial setup. assert_eq!( List::::get_bags(), @@ -126,7 +130,7 @@ frame_benchmarking::benchmarks! { let caller = whitelisted_caller(); // update the weight of `origin_tail` to guarantee it will be rebagged into the destination. T::ScoreProvider::set_score_of(&origin_tail, dest_bag_thresh); - }: rebag(SystemOrigin::Signed(caller), origin_tail.clone()) + }: rebag(SystemOrigin::Signed(caller), origin_tail_lookup.clone()) verify { // check the bags have updated as expected. assert_eq!( @@ -146,7 +150,7 @@ frame_benchmarking::benchmarks! { // clear any pre-existing storage. // NOTE: safe to call outside block production - List::::unsafe_clear(); + List::::unsafe_clear(); let bag_thresh = T::BagThresholds::get()[0]; @@ -166,13 +170,15 @@ frame_benchmarking::benchmarks! { T::ScoreProvider::set_score_of(&lighter, bag_thresh - One::one()); T::ScoreProvider::set_score_of(&heavier, bag_thresh); + let lighter_lookup = T::Lookup::unlookup(lighter.clone()); + assert_eq!( List::::iter().map(|n| n.id().clone()).collect::>(), vec![lighter.clone(), heavier_prev.clone(), heavier.clone(), heavier_next.clone()] ); whitelist_account!(heavier); - }: _(SystemOrigin::Signed(heavier.clone()), lighter.clone()) + }: _(SystemOrigin::Signed(heavier.clone()), lighter_lookup.clone()) verify { assert_eq!( List::::iter().map(|n| n.id().clone()).collect::>(), diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 7eee8fdfa23d8..5163a579c6f43 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -56,7 +56,7 @@ use codec::FullCodec; use frame_election_provider_support::{ScoreProvider, SortedListProvider}; use frame_system::ensure_signed; -use sp_runtime::traits::{AtLeast32BitUnsigned, Bounded}; +use sp_runtime::traits::{AtLeast32BitUnsigned, Bounded, StaticLookup}; use sp_std::prelude::*; #[cfg(any(feature = "runtime-benchmarks", test))] @@ -90,6 +90,8 @@ macro_rules! log { }; } +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -222,8 +224,9 @@ pub mod pallet { /// /// If `dislocated` does not exists, it returns an error. #[pallet::weight(T::WeightInfo::rebag_non_terminal().max(T::WeightInfo::rebag_terminal()))] - pub fn rebag(origin: OriginFor, dislocated: T::AccountId) -> DispatchResult { + pub fn rebag(origin: OriginFor, dislocated: AccountIdLookupOf) -> DispatchResult { ensure_signed(origin)?; + let dislocated = T::Lookup::lookup(dislocated)?; let current_score = T::ScoreProvider::score(&dislocated); let _ = Pallet::::do_rebag(&dislocated, current_score) .map_err::, _>(Into::into)?; @@ -239,8 +242,12 @@ pub mod pallet { /// - both nodes are within the same bag, /// - and `origin` has a greater `Score` than `lighter`. #[pallet::weight(T::WeightInfo::put_in_front_of())] - pub fn put_in_front_of(origin: OriginFor, lighter: T::AccountId) -> DispatchResult { + pub fn put_in_front_of( + origin: OriginFor, + lighter: AccountIdLookupOf, + ) -> DispatchResult { let heavier = ensure_signed(origin)?; + let lighter = T::Lookup::lookup(lighter)?; List::::put_in_front_of(&lighter, &heavier) .map_err::, _>(Into::into) .map_err::(Into::into) diff --git a/frame/bags-list/src/migrations.rs b/frame/bags-list/src/migrations.rs index a77beb23bd667..49b7d136125e2 100644 --- a/frame/bags-list/src/migrations.rs +++ b/frame/bags-list/src/migrations.rs @@ -21,7 +21,6 @@ use codec::{Decode, Encode}; use core::marker::PhantomData; use frame_election_provider_support::ScoreProvider; use frame_support::traits::OnRuntimeUpgrade; -use sp_runtime::traits::Zero; #[cfg(feature = "try-runtime")] use frame_support::ensure; diff --git a/frame/bags-list/src/weights.rs b/frame/bags-list/src/weights.rs index a554a9bd4ad1a..049967e3024c0 100644 --- a/frame/bags-list/src/weights.rs +++ b/frame/bags-list/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_bags_list. @@ -57,18 +57,18 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:4 w:4) // Storage: BagsList ListBags (r:1 w:1) fn rebag_non_terminal() -> Weight { - (55_040_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(55_040_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn rebag_terminal() -> Weight { - (53_671_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(53_671_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: BagsList ListNodes (r:4 w:4) // Storage: Staking Bonded (r:2 w:0) @@ -76,9 +76,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: BagsList ListBags (r:1 w:1) fn put_in_front_of() -> Weight { - (56_410_000 as Weight) - .saturating_add(T::DbWeight::get().reads(10 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(56_410_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(10 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } } @@ -89,18 +89,18 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:4 w:4) // Storage: BagsList ListBags (r:1 w:1) fn rebag_non_terminal() -> Weight { - (55_040_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(55_040_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn rebag_terminal() -> Weight { - (53_671_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(53_671_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: BagsList ListNodes (r:4 w:4) // Storage: Staking Bonded (r:2 w:0) @@ -108,8 +108,8 @@ impl WeightInfo for () { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: BagsList ListBags (r:1 w:1) fn put_in_front_of() -> Weight { - (56_410_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(10 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(56_410_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(10 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } } diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 4a874e4ffa1d5..206adba0f044b 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -46,7 +46,7 @@ benchmarks_instance_pallet! { // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, // and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); }: transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) verify { @@ -60,7 +60,7 @@ benchmarks_instance_pallet! { transfer_best_case { let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); // Give the sender account max funds for transfer (their account will never reasonably be killed). let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); @@ -80,7 +80,7 @@ benchmarks_instance_pallet! { transfer_keep_alive { let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); // Give the sender account max funds, thus a transfer will not kill account. let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); @@ -95,7 +95,7 @@ benchmarks_instance_pallet! { // Benchmark `set_balance` coming from ROOT account. This always creates an account. set_balance_creating { let user: T::AccountId = account("user", 0, SEED); - let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); + let user_lookup = T::Lookup::unlookup(user.clone()); // Give the user some initial balance. let existential_deposit = T::ExistentialDeposit::get(); @@ -110,7 +110,7 @@ benchmarks_instance_pallet! { // Benchmark `set_balance` coming from ROOT account. This always kills an account. set_balance_killing { let user: T::AccountId = account("user", 0, SEED); - let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); + let user_lookup = T::Lookup::unlookup(user.clone()); // Give the user some initial balance. let existential_deposit = T::ExistentialDeposit::get(); @@ -127,7 +127,7 @@ benchmarks_instance_pallet! { force_transfer { let existential_deposit = T::ExistentialDeposit::get(); let source: T::AccountId = account("source", 0, SEED); - let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); + let source_lookup = T::Lookup::unlookup(source.clone()); // Give some multiple of the existential deposit let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); @@ -135,7 +135,7 @@ benchmarks_instance_pallet! { // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); }: force_transfer(RawOrigin::Root, source_lookup, recipient_lookup, transfer_amount) verify { @@ -160,7 +160,7 @@ benchmarks_instance_pallet! { // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, // and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); // Create a bunch of users in storage. @@ -182,7 +182,7 @@ benchmarks_instance_pallet! { transfer_all { let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); // Give some multiple of the existential deposit let existential_deposit = T::ExistentialDeposit::get(); @@ -196,7 +196,7 @@ benchmarks_instance_pallet! { force_unreserve { let user: T::AccountId = account("user", 0, SEED); - let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); + let user_lookup = T::Lookup::unlookup(user.clone()); // Give some multiple of the existential deposit let existential_deposit = T::ExistentialDeposit::get(); diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 683ebce2b1693..0a45350366449 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -192,6 +192,8 @@ pub use weights::WeightInfo; pub use pallet::*; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -275,7 +277,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::transfer())] pub fn transfer( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, #[pallet::compact] value: T::Balance, ) -> DispatchResultWithPostInfo { let transactor = ensure_signed(origin)?; @@ -303,7 +305,7 @@ pub mod pallet { )] pub fn set_balance( origin: OriginFor, - who: ::Source, + who: AccountIdLookupOf, #[pallet::compact] new_free: T::Balance, #[pallet::compact] new_reserved: T::Balance, ) -> DispatchResultWithPostInfo { @@ -353,8 +355,8 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_transfer())] pub fn force_transfer( origin: OriginFor, - source: ::Source, - dest: ::Source, + source: AccountIdLookupOf, + dest: AccountIdLookupOf, #[pallet::compact] value: T::Balance, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; @@ -378,7 +380,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::transfer_keep_alive())] pub fn transfer_keep_alive( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, #[pallet::compact] value: T::Balance, ) -> DispatchResultWithPostInfo { let transactor = ensure_signed(origin)?; @@ -407,7 +409,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::transfer_all())] pub fn transfer_all( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, keep_alive: bool, ) -> DispatchResult { use fungible::Inspect; @@ -425,7 +427,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_unreserve())] pub fn force_unreserve( origin: OriginFor, - who: ::Source, + who: AccountIdLookupOf, amount: T::Balance, ) -> DispatchResult { ensure_root(origin)?; @@ -998,6 +1000,8 @@ impl, I: 'static> Pallet { /// Is a no-op if: /// - the value to be moved is zero; or /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. + /// + /// NOTE: returns actual amount of transferred value in `Ok` case. fn do_transfer_reserved( slashed: &T::AccountId, beneficiary: &T::AccountId, @@ -1011,7 +1015,7 @@ impl, I: 'static> Pallet { if slashed == beneficiary { return match status { - Status::Free => Ok(Self::unreserve(slashed, value)), + Status::Free => Ok(value.saturating_sub(Self::unreserve(slashed, value))), Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), } } @@ -1783,6 +1787,8 @@ where /// Unreserve some funds, returning any amount that was unable to be unreserved. /// /// Is a no-op if the value to be unreserved is zero or the account does not exist. + /// + /// NOTE: returns amount value which wasn't successfully unreserved. fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { if value.is_zero() { return Zero::zero() diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 8f5470ae3cac2..6605af530563d 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -188,14 +188,14 @@ macro_rules! decl_tests { ChargeTransactionPayment::from(1), &1, CALL, - &info_from_weight(1), + &info_from_weight(Weight::one()), 1, ).is_err()); assert_ok!( as SignedExtension>::pre_dispatch( ChargeTransactionPayment::from(0), &1, CALL, - &info_from_weight(1), + &info_from_weight(Weight::one()), 1, )); @@ -206,14 +206,14 @@ macro_rules! decl_tests { ChargeTransactionPayment::from(1), &1, CALL, - &info_from_weight(1), + &info_from_weight(Weight::one()), 1, ).is_err()); assert!( as SignedExtension>::pre_dispatch( ChargeTransactionPayment::from(0), &1, CALL, - &info_from_weight(1), + &info_from_weight(Weight::one()), 1, ).is_err()); }); @@ -528,6 +528,22 @@ macro_rules! decl_tests { }); } + #[test] + fn transferring_reserved_balance_to_yourself_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 110); + assert_ok!(Balances::reserve(&1, 50)); + assert_ok!(Balances::repatriate_reserved(&1, &1, 50, Status::Free), 0); + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::reserved_balance(1), 0); + + assert_ok!(Balances::reserve(&1, 50)); + assert_ok!(Balances::repatriate_reserved(&1, &1, 60, Status::Free), 10); + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::reserved_balance(1), 0); + }); + } + #[test] fn transferring_reserved_balance_to_nonexistent_should_fail() { <$ext_builder>::default().build().execute_with(|| { @@ -1167,6 +1183,25 @@ macro_rules! decl_tests { }); } + #[test] + fn reserved_named_to_yourself_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 110); + + let id = [1u8; 8]; + + assert_ok!(Balances::reserve_named(&id, &1, 50)); + assert_ok!(Balances::repatriate_reserved_named(&id, &1, &1, 50, Status::Free), 0); + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + + assert_ok!(Balances::reserve_named(&id, &1, 50)); + assert_ok!(Balances::repatriate_reserved_named(&id, &1, &1, 60, Status::Free), 10); + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + }); + } + #[test] fn ensure_reserved_named_should_work() { <$ext_builder>::default().build().execute_with(|| { diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 4ab913cf1411a..266c37063d2f8 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -46,7 +46,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 6f4c50d90153a..ebb82f41a545c 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -34,7 +34,7 @@ type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where + pub struct Test where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, @@ -47,7 +47,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index 4c028840d553c..fbfd62eb63259 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -51,7 +51,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index f612d31997996..17b2541e0a998 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_balances. @@ -58,45 +58,45 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (41_860_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(41_860_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - (32_760_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(32_760_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - (22_279_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_279_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - (25_488_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_488_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - (42_190_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(42_190_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - (37_789_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(37_789_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn force_unreserve() -> Weight { - (20_056_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(20_056_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -104,44 +104,44 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (41_860_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(41_860_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - (32_760_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(32_760_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - (22_279_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_279_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - (25_488_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_488_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - (42_190_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(42_190_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - (37_789_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(37_789_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn force_unreserve() -> Weight { - (20_056_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(20_056_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/beefy-mmr/src/tests.rs b/frame/beefy-mmr/src/tests.rs index d9cd8c8a5d8c8..eaa50004ae848 100644 --- a/frame/beefy-mmr/src/tests.rs +++ b/frame/beefy-mmr/src/tests.rs @@ -44,16 +44,12 @@ pub fn beefy_log(log: ConsensusLog) -> DigestItem { DigestItem::Consensus(BEEFY_ENGINE_ID, log.encode()) } -fn offchain_key(pos: usize) -> Vec { - (::INDEXING_PREFIX, pos as u64).encode() -} - -fn read_mmr_leaf(ext: &mut TestExternalities, index: usize) -> MmrLeaf { +fn read_mmr_leaf(ext: &mut TestExternalities, key: Vec) -> MmrLeaf { type Node = pallet_mmr::primitives::DataOrHash; ext.persist_offchain_overlay(); let offchain_db = ext.offchain_db(); offchain_db - .get(&offchain_key(index)) + .get(&key) .map(|d| Node::decode(&mut &*d).unwrap()) .map(|n| match n { Node::Data(d) => d, @@ -105,12 +101,17 @@ fn should_contain_mmr_digest() { #[test] fn should_contain_valid_leaf_data() { + fn node_offchain_key(parent_hash: H256, pos: usize) -> Vec { + (::INDEXING_PREFIX, parent_hash, pos as u64).encode() + } + let mut ext = new_test_ext(vec![1, 2, 3, 4]); - ext.execute_with(|| { + let parent_hash = ext.execute_with(|| { init_block(1); + >::parent_hash() }); - let mmr_leaf = read_mmr_leaf(&mut ext, 0); + let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(parent_hash, 0)); assert_eq!( mmr_leaf, MmrLeaf { @@ -128,11 +129,12 @@ fn should_contain_valid_leaf_data() { ); // build second block on top - ext.execute_with(|| { + let parent_hash = ext.execute_with(|| { init_block(2); + >::parent_hash() }); - let mmr_leaf = read_mmr_leaf(&mut ext, 1); + let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(parent_hash, 1)); assert_eq!( mmr_leaf, MmrLeaf { diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 4205274b5dbc3..c098eee528233 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -23,6 +23,7 @@ frame-support = { version = "4.0.0-dev", default-features = false, path = "../su frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../../primitives/runtime-interface" } @@ -31,6 +32,7 @@ sp-storage = { version = "6.0.0", default-features = false, path = "../../primit [dev-dependencies] hex-literal = "0.3.4" +rusty-fork = { version = "0.3.0", default-features = false } sp-keystore = { version = "0.12.0", path = "../../primitives/keystore" } [features] @@ -45,6 +47,7 @@ std = [ "serde", "sp-api/std", "sp-application-crypto/std", + "sp-core/std", "sp-io/std", "sp-runtime-interface/std", "sp-runtime/std", diff --git a/frame/benchmarking/README.md b/frame/benchmarking/README.md index f0fe05cc140f2..6316cd5903c8b 100644 --- a/frame/benchmarking/README.md +++ b/frame/benchmarking/README.md @@ -125,6 +125,8 @@ cargo test -p pallet-balances --features runtime-benchmarks > ``` > To solve this, navigate to the folder of the node (`cd bin/node/cli`) or pallet (`cd frame/pallet`) and run the command there. +This will instance each linear component with different values. The number of values per component is set to six and can be changed with the `VALUES_PER_COMPONENT` environment variable. + ## Adding Benchmarks The benchmarks included with each pallet are not automatically added to your node. To actually diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index afd53915cc397..18472595f15b9 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -38,6 +38,8 @@ pub use log; #[doc(hidden)] pub use paste; #[doc(hidden)] +pub use sp_core::defer; +#[doc(hidden)] pub use sp_io::storage::root as storage_root; #[doc(hidden)] pub use sp_runtime::traits::Zero; @@ -545,7 +547,7 @@ macro_rules! benchmarks_iter { ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) + $name:ident { $( $code:tt )* }: _ $(<$origin_type:ty>)? ( $origin:expr $( , $arg:expr )* ) $( $rest:tt )* ) => { $crate::benchmarks_iter! { @@ -555,7 +557,7 @@ macro_rules! benchmarks_iter { ( $( $names )* ) ( $( $names_extra )* ) ( $( $names_skip_meta )* ) - $name { $( $code )* }: _ ( $origin $( , $arg )* ) + $name { $( $code )* }: _ $(<$origin_type>)? ( $origin $( , $arg )* ) verify { } $( $rest )* } @@ -568,7 +570,7 @@ macro_rules! benchmarks_iter { ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) + $name:ident { $( $code:tt )* }: $dispatch:ident $(<$origin_type:ty>)? ( $origin:expr $( , $arg:expr )* ) $( $rest:tt )* ) => { $crate::benchmarks_iter! { @@ -578,7 +580,7 @@ macro_rules! benchmarks_iter { ( $( $names )* ) ( $( $names_extra )* ) ( $( $names_skip_meta )* ) - $name { $( $code )* }: $dispatch ( $origin $( , $arg )* ) + $name { $( $code )* }: $dispatch $(<$origin_type>)? ( $origin $( , $arg )* ) verify { } $( $rest )* } @@ -591,7 +593,7 @@ macro_rules! benchmarks_iter { ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: $eval:block + $name:ident { $( $code:tt )* }: $(<$origin_type:ty>)? $eval:block $( $rest:tt )* ) => { $crate::benchmarks_iter!( @@ -601,7 +603,7 @@ macro_rules! benchmarks_iter { ( $( $names )* ) ( $( $names_extra )* ) ( $( $names_skip_meta )* ) - $name { $( $code )* }: $eval + $name { $( $code )* }: $(<$origin_type>)? $eval verify { } $( $rest )* ); @@ -615,7 +617,7 @@ macro_rules! to_origin { $origin.into() }; ($origin:expr, $origin_type:ty) => { - >::from($origin) + <::Origin as From<$origin_type>>::from($origin) }; } @@ -1033,6 +1035,9 @@ macro_rules! impl_benchmark { // Always do at least one internal repeat... for _ in 0 .. internal_repeats.max(1) { + // Always reset the state after the benchmark. + $crate::defer!($crate::benchmarking::wipe_db()); + // Set up the externalities environment for the setup we want to // benchmark. let closure_to_benchmark = < @@ -1054,7 +1059,9 @@ macro_rules! impl_benchmark { // Time the extrinsic logic. $crate::log::trace!( target: "benchmark", - "Start Benchmark: {:?}", c + "Start Benchmark: {} ({:?})", + extrinsic, + c ); let start_pov = $crate::benchmarking::proof_size(); @@ -1108,9 +1115,6 @@ macro_rules! impl_benchmark { proof_size: diff_pov, keys: read_and_written_keys, }); - - // Wipe the DB back to the genesis state. - $crate::benchmarking::wipe_db(); } return Ok(results); @@ -1149,6 +1153,8 @@ macro_rules! impl_benchmark { // This creates a unit test for one benchmark of the main benchmark macro. // It runs the benchmark using the `high` and `low` value for each component // and ensure that everything completes successfully. +// Instances each component with six values which can be controlled with the +// env variable `VALUES_PER_COMPONENT`. #[macro_export] #[doc(hidden)] macro_rules! impl_benchmark_test { @@ -1173,6 +1179,9 @@ macro_rules! impl_benchmark_test { let execute_benchmark = | c: $crate::Vec<($crate::BenchmarkParameter, u32)> | -> Result<(), $crate::BenchmarkError> { + // Always reset the state after the benchmark. + $crate::defer!($crate::benchmarking::wipe_db()); + // Set up the benchmark, return execution + verification function. let closure_to_verify = < SelectedBenchmark as $crate::BenchmarkingSetup @@ -1184,27 +1193,48 @@ macro_rules! impl_benchmark_test { } // Run execution + verification - closure_to_verify()?; - - // Reset the state - $crate::benchmarking::wipe_db(); - - Ok(()) + closure_to_verify() }; if components.is_empty() { execute_benchmark(Default::default())?; } else { - for (name, low, high) in components.iter() { - // Test only the low and high value, assuming values in the middle - // won't break - for component_value in $crate::vec![low, high] { + let num_values: u32 = if let Ok(ev) = std::env::var("VALUES_PER_COMPONENT") { + ev.parse().map_err(|_| { + $crate::BenchmarkError::Stop( + "Could not parse env var `VALUES_PER_COMPONENT` as u32." + ) + })? + } else { + 6 + }; + + if num_values < 2 { + return Err("`VALUES_PER_COMPONENT` must be at least 2".into()); + } + + for (name, low, high) in components.clone().into_iter() { + // Test the lowest, highest (if its different from the lowest) + // and up to num_values-2 more equidistant values in between. + // For 0..10 and num_values=6 this would mean: [0, 2, 4, 6, 8, 10] + + let mut values = $crate::vec![low]; + let diff = (high - low).min(num_values - 1); + let slope = (high - low) as f32 / diff as f32; + + for i in 1..=diff { + let value = ((low as f32 + slope * i as f32) as u32) + .clamp(low, high); + values.push(value); + } + + for component_value in values { // Select the max value for all the other components. let c: $crate::Vec<($crate::BenchmarkParameter, u32)> = components .iter() .map(|(n, _, h)| - if n == name { - (*n, *component_value) + if *n == name { + (*n, component_value) } else { (*n, *h) } diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 06f2b5bdc4916..b8a888767bf2c 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -27,6 +27,7 @@ use sp_runtime::{ BuildStorage, }; use sp_std::prelude::*; +use std::cell::RefCell; #[frame_support::pallet] mod pallet_test { @@ -125,11 +126,20 @@ fn new_test_ext() -> sp_io::TestExternalities { GenesisConfig::default().build_storage().unwrap().into() } +thread_local! { + /// Tracks the used components per value. Needs to be a thread local since the + /// benchmarking clears the storage after each run. + static VALUES_PER_COMPONENT: RefCell> = RefCell::new(vec![]); +} + +// NOTE: This attribute is only needed for the `modify_in_` functions. +#[allow(unreachable_code)] mod benchmarks { - use super::{new_test_ext, pallet_test::Value, Test}; + use super::{new_test_ext, pallet_test::Value, Test, VALUES_PER_COMPONENT}; use crate::{account, BenchmarkError, BenchmarkParameter, BenchmarkResult, BenchmarkingSetup}; use frame_support::{assert_err, assert_ok, ensure, traits::Get}; use frame_system::RawOrigin; + use rusty_fork::rusty_fork_test; use sp_std::prelude::*; // Additional used internally by the benchmark macro. @@ -227,6 +237,31 @@ mod benchmarks { // This should never be reached. assert!(value > 100); } + + modify_in_setup_then_error { + Value::::set(Some(123)); + return Err(BenchmarkError::Stop("Should error")); + }: { } + + modify_in_call_then_error { + }: { + Value::::set(Some(123)); + return Err(BenchmarkError::Stop("Should error")); + } + + modify_in_verify_then_error { + }: { + } verify { + Value::::set(Some(123)); + return Err(BenchmarkError::Stop("Should error")); + } + + // Stores all component values in the thread-local storage. + values_per_component { + let n in 0 .. 10; + }: { + VALUES_PER_COMPONENT.with(|v| v.borrow_mut().push(n)); + } } #[test] @@ -350,4 +385,70 @@ mod benchmarks { assert_eq!(Pallet::::test_benchmark_skip_benchmark(), Err(BenchmarkError::Skip),); }); } + + /// An error return of a benchmark test function still causes the db to be wiped. + #[test] + fn benchmark_error_wipes_storage() { + new_test_ext().execute_with(|| { + // It resets when the error happens in the setup: + assert_err!( + Pallet::::test_benchmark_modify_in_setup_then_error(), + "Should error" + ); + assert_eq!(Value::::get(), None); + + // It resets when the error happens in the call: + assert_err!(Pallet::::test_benchmark_modify_in_call_then_error(), "Should error"); + assert_eq!(Value::::get(), None); + + // It resets when the error happens in the verify: + assert_err!( + Pallet::::test_benchmark_modify_in_verify_then_error(), + "Should error" + ); + assert_eq!(Value::::get(), None); + }); + } + + rusty_fork_test! { + /// Test that the benchmarking uses the correct values for each component and + /// that the number of components can be controlled with `VALUES_PER_COMPONENT`. + /// + /// NOTE: This test needs to run in its own process, since it + /// otherwise messes up the env variable for the other tests. + #[test] + fn test_values_per_component() { + let tests = vec![ + (Some("1"), Err("`VALUES_PER_COMPONENT` must be at least 2".into())), + (Some("asdf"), Err("Could not parse env var `VALUES_PER_COMPONENT` as u32.".into())), + (None, Ok(vec![0, 2, 4, 6, 8, 10])), + (Some("2"), Ok(vec![0, 10])), + (Some("4"), Ok(vec![0, 3, 6, 10])), + (Some("6"), Ok(vec![0, 2, 4, 6, 8, 10])), + (Some("10"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 10])), + (Some("11"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])), + (Some("99"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])), + ]; + + for (num, expected) in tests { + run_test_values_per_component(num, expected); + } + } + } + + /// Helper for [`test_values_per_component`]. + fn run_test_values_per_component(num: Option<&str>, output: Result, BenchmarkError>) { + VALUES_PER_COMPONENT.with(|v| v.borrow_mut().clear()); + match num { + Some(n) => std::env::set_var("VALUES_PER_COMPONENT", n), + None => std::env::remove_var("VALUES_PER_COMPONENT"), + } + + new_test_ext().execute_with(|| { + let got = Pallet::::test_benchmark_values_per_component() + .map(|_| VALUES_PER_COMPONENT.with(|v| v.borrow().clone())); + + assert_eq!(got, output); + }); + } } diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 8c642f74358db..b483208e3ef69 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -127,7 +127,7 @@ pub struct BenchmarkResult { impl BenchmarkResult { pub fn from_weight(w: Weight) -> Self { - Self { extrinsic_time: (w as u128) / 1_000, ..Default::default() } + Self { extrinsic_time: (w.ref_time() / 1_000) as u128, ..Default::default() } } } diff --git a/frame/benchmarking/src/weights.rs b/frame/benchmarking/src/weights.rs index 8b36601940cf3..dc2ca26e1af02 100644 --- a/frame/benchmarking/src/weights.rs +++ b/frame/benchmarking/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for frame_benchmarking. @@ -58,75 +58,75 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn addition(_i: u32, ) -> Weight { - (103_000 as Weight) + Weight::from_ref_time(103_000 as RefTimeWeight) } fn subtraction(_i: u32, ) -> Weight { - (105_000 as Weight) + Weight::from_ref_time(105_000 as RefTimeWeight) } fn multiplication(_i: u32, ) -> Weight { - (113_000 as Weight) + Weight::from_ref_time(113_000 as RefTimeWeight) } fn division(_i: u32, ) -> Weight { - (102_000 as Weight) + Weight::from_ref_time(102_000 as RefTimeWeight) } fn hashing(_i: u32, ) -> Weight { - (20_865_902_000 as Weight) + Weight::from_ref_time(20_865_902_000 as RefTimeWeight) } fn sr25519_verification(i: u32, ) -> Weight { - (319_000 as Weight) + Weight::from_ref_time(319_000 as RefTimeWeight) // Standard Error: 8_000 - .saturating_add((47_171_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(Weight::from_ref_time(47_171_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) fn storage_read(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((2_110_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(2_110_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) fn storage_write(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((372_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(372_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } } // For backwards compatibility and tests impl WeightInfo for () { fn addition(_i: u32, ) -> Weight { - (103_000 as Weight) + Weight::from_ref_time(103_000 as RefTimeWeight) } fn subtraction(_i: u32, ) -> Weight { - (105_000 as Weight) + Weight::from_ref_time(105_000 as RefTimeWeight) } fn multiplication(_i: u32, ) -> Weight { - (113_000 as Weight) + Weight::from_ref_time(113_000 as RefTimeWeight) } fn division(_i: u32, ) -> Weight { - (102_000 as Weight) + Weight::from_ref_time(102_000 as RefTimeWeight) } fn hashing(_i: u32, ) -> Weight { - (20_865_902_000 as Weight) + Weight::from_ref_time(20_865_902_000 as RefTimeWeight) } fn sr25519_verification(i: u32, ) -> Weight { - (319_000 as Weight) + Weight::from_ref_time(319_000 as RefTimeWeight) // Standard Error: 8_000 - .saturating_add((47_171_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(Weight::from_ref_time(47_171_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) fn storage_read(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((2_110_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(2_110_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) fn storage_write(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((372_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(372_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } } diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 7566c32f6e9a1..0c8c875dc611c 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -37,7 +37,8 @@ fn create_approved_bounties, I: 'static>(n: u32) -> Result<(), &'st setup_bounty::(i, T::MaximumReasonLength::get()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + let approve_origin = T::ApproveOrigin::successful_origin(); + Bounties::::approve_bounty(approve_origin, bounty_id)?; } ensure!(BountyApprovals::::get().len() == n as usize, "Not all bounty approved"); Ok(()) @@ -61,20 +62,16 @@ fn setup_bounty, I: 'static>( } fn create_bounty, I: 'static>( -) -> Result<(::Source, BountyIndex), &'static str> { +) -> Result<(AccountIdLookupOf, BountyIndex), &'static str> { let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); let curator_lookup = T::Lookup::unlookup(curator.clone()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + let approve_origin = T::ApproveOrigin::successful_origin(); + Bounties::::approve_bounty(approve_origin.clone(), bounty_id)?; Treasury::::on_initialize(T::BlockNumber::zero()); - Bounties::::propose_curator( - RawOrigin::Root.into(), - bounty_id, - curator_lookup.clone(), - fee, - )?; + Bounties::::propose_curator(approve_origin, bounty_id, curator_lookup.clone(), fee)?; Bounties::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; Ok((curator_lookup, bounty_id)) } @@ -100,7 +97,8 @@ benchmarks_instance_pallet! { let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - }: _(RawOrigin::Root, bounty_id) + let approve_origin = T::ApproveOrigin::successful_origin(); + }: _(approve_origin, bounty_id) propose_curator { setup_pot_account::(); @@ -108,9 +106,11 @@ benchmarks_instance_pallet! { let curator_lookup = T::Lookup::unlookup(curator); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + let approve_origin = T::ApproveOrigin::successful_origin(); + Bounties::::approve_bounty(approve_origin, bounty_id)?; Treasury::::on_initialize(T::BlockNumber::zero()); - }: _(RawOrigin::Root, bounty_id, curator_lookup, fee) + let approve_origin = T::ApproveOrigin::successful_origin(); + }: _(approve_origin, bounty_id, curator_lookup, fee) // Worst case when curator is inactive and any sender unassigns the curator. unassign_curator { @@ -128,9 +128,10 @@ benchmarks_instance_pallet! { let curator_lookup = T::Lookup::unlookup(curator.clone()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + let approve_origin = T::ApproveOrigin::successful_origin(); + Bounties::::approve_bounty(approve_origin.clone(), bounty_id)?; Treasury::::on_initialize(T::BlockNumber::zero()); - Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; + Bounties::::propose_curator(approve_origin, bounty_id, curator_lookup, fee)?; }: _(RawOrigin::Signed(curator), bounty_id) award_bounty { @@ -169,14 +170,16 @@ benchmarks_instance_pallet! { let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - }: close_bounty(RawOrigin::Root, bounty_id) + let approve_origin = T::ApproveOrigin::successful_origin(); + }: close_bounty(approve_origin, bounty_id) close_bounty_active { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; Treasury::::on_initialize(T::BlockNumber::zero()); let bounty_id = BountyCount::::get() - 1; - }: close_bounty(RawOrigin::Root, bounty_id) + let approve_origin = T::ApproveOrigin::successful_origin(); + }: close_bounty(approve_origin, bounty_id) verify { assert_last_event::(Event::BountyCanceled { index: bounty_id }.into()) } diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index bd98b8b8b0b19..971769cd50e2d 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -114,6 +114,8 @@ type PositiveImbalanceOf = pallet_treasury::PositiveImbalanceOf /// An index of a bounty. Just a `u32`. pub type BountyIndex = u32; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + /// A bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct Bounty { @@ -381,7 +383,7 @@ pub mod pallet { pub fn propose_curator( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, - curator: ::Source, + curator: AccountIdLookupOf, #[pallet::compact] fee: BalanceOf, ) -> DispatchResult { T::ApproveOrigin::ensure_origin(origin)?; @@ -390,7 +392,7 @@ pub mod pallet { Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; match bounty.status { - BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => {}, + BountyStatus::Funded => {}, _ => return Err(Error::::UnexpectedStatus.into()), }; @@ -553,7 +555,7 @@ pub mod pallet { pub fn award_bounty( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, - beneficiary: ::Source, + beneficiary: AccountIdLookupOf, ) -> DispatchResult { let signer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; diff --git a/frame/bounties/src/migrations/v4.rs b/frame/bounties/src/migrations/v4.rs index 8f5f3ebe55bf4..2f81c97127bcd 100644 --- a/frame/bounties/src/migrations/v4.rs +++ b/frame/bounties/src/migrations/v4.rs @@ -54,7 +54,7 @@ pub fn migrate< target: "runtime::bounties", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return 0 + return Weight::zero() } let on_chain_storage_version =

::on_chain_storage_version(); @@ -105,7 +105,7 @@ pub fn migrate< "Attempted to apply migration to v4 but failed because storage version is {:?}", on_chain_storage_version, ); - 0 + Weight::zero() } } diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index ff220600794d4..b4ce039b35fbc 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -1126,8 +1126,8 @@ fn accept_curator_handles_different_deposit_calculations() { assert_ok!(Bounties::propose_bounty(Origin::signed(0), value, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(Origin::root(), bounty_index)); - System::set_block_number(3); - >::on_initialize(3); + System::set_block_number(4); + >::on_initialize(4); assert_ok!(Bounties::propose_curator(Origin::root(), bounty_index, user, fee)); assert_ok!(Bounties::accept_curator(Origin::signed(user), bounty_index)); @@ -1150,8 +1150,8 @@ fn accept_curator_handles_different_deposit_calculations() { assert_ok!(Bounties::propose_bounty(Origin::signed(0), value, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(Origin::root(), bounty_index)); - System::set_block_number(3); - >::on_initialize(3); + System::set_block_number(6); + >::on_initialize(6); assert_ok!(Bounties::propose_curator(Origin::root(), bounty_index, user, fee)); assert_ok!(Bounties::accept_curator(Origin::signed(user), bounty_index)); diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index d3e054cfc6351..27a23cb4ffeae 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_bounties. @@ -65,88 +65,88 @@ impl WeightInfo for SubstrateWeight { // Storage: Bounties BountyDescriptions (r:0 w:1) // Storage: Bounties Bounties (r:0 w:1) fn propose_bounty(_d: u32, ) -> Weight { - (28_903_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(28_903_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) fn approve_bounty() -> Weight { - (10_997_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(10_997_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) fn propose_curator() -> Weight { - (8_967_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(8_967_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - (28_665_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(28_665_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - (25_141_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(25_141_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) fn award_bounty() -> Weight { - (21_295_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_295_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn claim_bounty() -> Weight { - (67_951_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(67_951_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_proposed() -> Weight { - (33_654_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(33_654_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:2 w:2) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_active() -> Weight { - (50_582_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(50_582_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) fn extend_bounty_expiry() -> Weight { - (18_322_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_322_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:2 w:2) fn spend_funds(b: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add((29_233_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(Weight::from_ref_time(29_233_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) } } @@ -157,87 +157,87 @@ impl WeightInfo for () { // Storage: Bounties BountyDescriptions (r:0 w:1) // Storage: Bounties Bounties (r:0 w:1) fn propose_bounty(_d: u32, ) -> Weight { - (28_903_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(28_903_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) fn approve_bounty() -> Weight { - (10_997_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(10_997_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) fn propose_curator() -> Weight { - (8_967_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(8_967_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - (28_665_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(28_665_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - (25_141_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(25_141_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) fn award_bounty() -> Weight { - (21_295_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_295_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn claim_bounty() -> Weight { - (67_951_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(67_951_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_proposed() -> Weight { - (33_654_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(33_654_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:2 w:2) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_active() -> Weight { - (50_582_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(50_582_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) fn extend_bounty_expiry() -> Weight { - (18_322_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_322_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:2 w:2) fn spend_funds(b: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add((29_233_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(Weight::from_ref_time(29_233_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) } } diff --git a/frame/child-bounties/src/benchmarking.rs b/frame/child-bounties/src/benchmarking.rs index dcb54361fac89..ca5af50276b9d 100644 --- a/frame/child-bounties/src/benchmarking.rs +++ b/frame/child-bounties/src/benchmarking.rs @@ -221,7 +221,7 @@ benchmarks! { unassign_curator { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; - Bounties::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(T::BlockNumber::zero()); frame_system::Pallet::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), bounty_setup.bounty_id, @@ -295,7 +295,7 @@ benchmarks! { close_child_bounty_active { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; - Bounties::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(T::BlockNumber::zero()); }: close_child_bounty(RawOrigin::Root, bounty_setup.bounty_id, bounty_setup.child_bounty_id) verify { assert_last_event::(Event::Canceled { diff --git a/frame/child-bounties/src/lib.rs b/frame/child-bounties/src/lib.rs index 4f25fdcf8903a..5f396cd2d4567 100644 --- a/frame/child-bounties/src/lib.rs +++ b/frame/child-bounties/src/lib.rs @@ -80,6 +80,7 @@ pub use pallet::*; type BalanceOf = pallet_treasury::BalanceOf; type BountiesError = pallet_bounties::Error; type BountyIndex = pallet_bounties::BountyIndex; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// A child bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -315,7 +316,7 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] parent_bounty_id: BountyIndex, #[pallet::compact] child_bounty_id: BountyIndex, - curator: ::Source, + curator: AccountIdLookupOf, #[pallet::compact] fee: BalanceOf, ) -> DispatchResult { let signer = ensure_signed(origin)?; @@ -574,7 +575,7 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] parent_bounty_id: BountyIndex, #[pallet::compact] child_bounty_id: BountyIndex, - beneficiary: ::Source, + beneficiary: AccountIdLookupOf, ) -> DispatchResult { let signer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; diff --git a/frame/child-bounties/src/tests.rs b/frame/child-bounties/src/tests.rs index 2584445071471..f42715f14bbee 100644 --- a/frame/child-bounties/src/tests.rs +++ b/frame/child-bounties/src/tests.rs @@ -60,7 +60,7 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockWeight: Weight = Weight::from_ref_time(1024); pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); } diff --git a/frame/child-bounties/src/weights.rs b/frame/child-bounties/src/weights.rs index ad08e00149a30..c5d00d6ed0bd4 100644 --- a/frame/child-bounties/src/weights.rs +++ b/frame/child-bounties/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_child_bounties. @@ -64,51 +64,51 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) // Storage: ChildBounties ChildBounties (r:0 w:1) fn add_child_bounty(d: u32, ) -> Weight { - (51_064_000 as Weight) + Weight::from_ref_time(51_064_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) fn propose_curator() -> Weight { - (15_286_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(15_286_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - (29_929_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_929_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: Bounties Bounties (r:1 w:0) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - (32_449_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(32_449_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) fn award_child_bounty() -> Weight { - (23_793_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(23_793_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn claim_child_bounty() -> Weight { - (67_529_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(67_529_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -117,9 +117,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:2 w:2) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_added() -> Weight { - (48_436_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(48_436_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -128,9 +128,9 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_active() -> Weight { - (58_044_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(7 as Weight)) + Weight::from_ref_time(58_044_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(7 as RefTimeWeight)) } } @@ -143,51 +143,51 @@ impl WeightInfo for () { // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) // Storage: ChildBounties ChildBounties (r:0 w:1) fn add_child_bounty(d: u32, ) -> Weight { - (51_064_000 as Weight) + Weight::from_ref_time(51_064_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) fn propose_curator() -> Weight { - (15_286_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(15_286_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - (29_929_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_929_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: Bounties Bounties (r:1 w:0) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - (32_449_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(32_449_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) fn award_child_bounty() -> Weight { - (23_793_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(23_793_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn claim_child_bounty() -> Weight { - (67_529_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(67_529_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -196,9 +196,9 @@ impl WeightInfo for () { // Storage: System Account (r:2 w:2) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_added() -> Weight { - (48_436_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(48_436_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -207,8 +207,8 @@ impl WeightInfo for () { // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_active() -> Weight { - (58_044_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(7 as Weight)) + Weight::from_ref_time(58_044_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(7 as RefTimeWeight)) } } diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index 076afcd203030..b80a4aef28d38 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -355,7 +355,7 @@ benchmarks_instance_pallet! { // Whitelist voter account from further DB operations. let voter_key = frame_system::Account::::hashed_key_for(&voter); frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); - }: close(SystemOrigin::Signed(voter), last_hash, index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash, index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. assert_eq!(Collective::::proposals().len(), (p - 1) as usize); @@ -436,7 +436,7 @@ benchmarks_instance_pallet! { index, approve, )?; - }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. assert_eq!(Collective::::proposals().len(), (p - 1) as usize); @@ -511,7 +511,7 @@ benchmarks_instance_pallet! { assert_eq!(Collective::::proposals().len(), p as usize); // Prime nay will close it as disapproved - }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::MAX, bytes_in_storage) verify { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); @@ -583,7 +583,7 @@ benchmarks_instance_pallet! { assert_eq!(Collective::::proposals().len(), p as usize); // Prime aye will close it as approved - }: close(SystemOrigin::Signed(caller), last_hash, p - 1, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(caller), last_hash, p - 1, Weight::MAX, bytes_in_storage) verify { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Err(DispatchError::BadOrigin) }.into()); diff --git a/frame/collective/src/migrations/v4.rs b/frame/collective/src/migrations/v4.rs index 4e6cd05584138..483c3f9fa9e69 100644 --- a/frame/collective/src/migrations/v4.rs +++ b/frame/collective/src/migrations/v4.rs @@ -45,7 +45,7 @@ pub fn migrate::on_chain_storage_version(); @@ -70,7 +70,7 @@ pub fn migrate::WrongProposalWeight ); assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); @@ -301,7 +307,7 @@ fn proposal_weight_limit_ignored_on_disapprove() { Origin::signed(4), hash, 0, - proposal_weight - 100, + proposal_weight - Weight::from_ref_time(100), proposal_len )); }) @@ -687,7 +693,11 @@ fn correct_validate_and_get_proposal() { Error::::WrongProposalLength ); assert_noop!( - Collective::validate_and_get_proposal(&hash, length, weight - 10), + Collective::validate_and_get_proposal( + &hash, + length, + weight - Weight::from_ref_time(10) + ), Error::::WrongProposalWeight ); let res = Collective::validate_and_get_proposal(&hash, length, weight); @@ -1196,18 +1206,18 @@ fn close_disapprove_does_not_care_about_weight_or_len() { assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); // It will not close with bad weight/len information assert_noop!( - Collective::close(Origin::signed(2), hash, 0, 0, 0), + Collective::close(Origin::signed(2), hash, 0, Weight::zero(), 0), Error::::WrongProposalLength, ); assert_noop!( - Collective::close(Origin::signed(2), hash, 0, 0, proposal_len), + Collective::close(Origin::signed(2), hash, 0, Weight::zero(), proposal_len), Error::::WrongProposalWeight, ); // Now we make the proposal fail assert_ok!(Collective::vote(Origin::signed(1), hash, 0, false)); assert_ok!(Collective::vote(Origin::signed(2), hash, 0, false)); // It can close even if the weight/len information is bad - assert_ok!(Collective::close(Origin::signed(2), hash, 0, 0, 0)); + assert_ok!(Collective::close(Origin::signed(2), hash, 0, Weight::zero(), 0)); }) } diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs index 2f5c6f590a999..a0cc64cc2408d 100644 --- a/frame/collective/src/weights.rs +++ b/frame/collective/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_collective. @@ -64,36 +64,36 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Voting (r:100 w:100) // Storage: Council Prime (r:0 w:1) fn set_members(m: u32, n: u32, p: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 12_000 - .saturating_add((10_280_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(10_280_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add((126_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(126_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add((13_310_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(13_310_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } // Storage: Council Members (r:1 w:0) fn execute(b: u32, m: u32, ) -> Weight { - (16_819_000 as Weight) + Weight::from_ref_time(16_819_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 0 - .saturating_add((33_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:0) fn propose_execute(b: u32, m: u32, ) -> Weight { - (18_849_000 as Weight) + Weight::from_ref_time(18_849_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 0 - .saturating_add((56_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) @@ -101,52 +101,52 @@ impl WeightInfo for SubstrateWeight { // Storage: Council ProposalCount (r:1 w:1) // Storage: Council Voting (r:0 w:1) fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - (22_204_000 as Weight) + Weight::from_ref_time(22_204_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((8_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(8_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((49_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((180_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(180_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) // Storage: Council Voting (r:1 w:1) fn vote(m: u32, ) -> Weight { - (30_941_000 as Weight) + Weight::from_ref_time(30_941_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((77_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - (32_485_000 as Weight) + Weight::from_ref_time(32_485_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((39_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(39_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - (33_487_000 as Weight) + Weight::from_ref_time(33_487_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((66_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((157_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(157_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) @@ -154,13 +154,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) fn close_disapproved(m: u32, p: u32, ) -> Weight { - (33_494_000 as Weight) + Weight::from_ref_time(33_494_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((58_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(58_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) @@ -168,25 +168,25 @@ impl WeightInfo for SubstrateWeight { // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - (36_566_000 as Weight) + Weight::from_ref_time(36_566_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((63_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(63_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((158_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Proposals (r:1 w:1) // Storage: Council Voting (r:0 w:1) // Storage: Council ProposalOf (r:0 w:1) fn disapprove_proposal(p: u32, ) -> Weight { - (20_159_000 as Weight) + Weight::from_ref_time(20_159_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((173_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } } @@ -197,36 +197,36 @@ impl WeightInfo for () { // Storage: Council Voting (r:100 w:100) // Storage: Council Prime (r:0 w:1) fn set_members(m: u32, n: u32, p: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 12_000 - .saturating_add((10_280_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(10_280_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add((126_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(126_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add((13_310_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(13_310_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } // Storage: Council Members (r:1 w:0) fn execute(b: u32, m: u32, ) -> Weight { - (16_819_000 as Weight) + Weight::from_ref_time(16_819_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 0 - .saturating_add((33_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:0) fn propose_execute(b: u32, m: u32, ) -> Weight { - (18_849_000 as Weight) + Weight::from_ref_time(18_849_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 0 - .saturating_add((56_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) @@ -234,52 +234,52 @@ impl WeightInfo for () { // Storage: Council ProposalCount (r:1 w:1) // Storage: Council Voting (r:0 w:1) fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - (22_204_000 as Weight) + Weight::from_ref_time(22_204_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((8_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(8_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((49_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((180_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(180_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) // Storage: Council Voting (r:1 w:1) fn vote(m: u32, ) -> Weight { - (30_941_000 as Weight) + Weight::from_ref_time(30_941_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((77_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - (32_485_000 as Weight) + Weight::from_ref_time(32_485_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((39_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(39_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - (33_487_000 as Weight) + Weight::from_ref_time(33_487_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((66_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((157_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(157_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) @@ -287,13 +287,13 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) fn close_disapproved(m: u32, p: u32, ) -> Weight { - (33_494_000 as Weight) + Weight::from_ref_time(33_494_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((58_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(58_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) @@ -301,24 +301,24 @@ impl WeightInfo for () { // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - (36_566_000 as Weight) + Weight::from_ref_time(36_566_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((63_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(63_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((158_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Proposals (r:1 w:1) // Storage: Council Voting (r:0 w:1) // Storage: Council ProposalOf (r:0 w:1) fn disapprove_proposal(p: u32, ) -> Weight { - (20_159_000 as Weight) + Weight::from_ref_time(20_159_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((173_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } } diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index d27801df33bda..ac85c469354fe 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -26,6 +26,7 @@ smallvec = { version = "1", default-features = false, features = [ "const_generics", ] } wasmi-validation = { version = "0.4", default-features = false } +impl-trait-for-tuples = "0.2" # Only used in benchmarking to generate random contract code rand = { version = "0.8", optional = true, default-features = false } diff --git a/frame/contracts/fixtures/chain_extension.wat b/frame/contracts/fixtures/chain_extension.wat index db7e83fd96b42..7cc7335052e90 100644 --- a/frame/contracts/fixtures/chain_extension.wat +++ b/frame/contracts/fixtures/chain_extension.wat @@ -15,12 +15,12 @@ ) ;; [0, 4) len of input output - (data (i32.const 0) "\02") + (data (i32.const 0) "\08") ;; [4, 12) buffer for input - ;; [12, 16) len of output buffer - (data (i32.const 12) "\02") + ;; [12, 48) len of output buffer + (data (i32.const 12) "\20") ;; [16, inf) buffer for output @@ -31,15 +31,15 @@ ;; the chain extension passes through the input and returns it as output (call $seal_call_chain_extension - (i32.load8_u (i32.const 4)) ;; func_id + (i32.load (i32.const 4)) ;; id (i32.const 4) ;; input_ptr (i32.load (i32.const 0)) ;; input_len (i32.const 16) ;; output_ptr (i32.const 12) ;; output_len_ptr ) - ;; the chain extension passes through the func_id - (call $assert (i32.eq (i32.load8_u (i32.const 4)))) + ;; the chain extension passes through the id + (call $assert (i32.eq (i32.load (i32.const 4)))) (call $seal_return (i32.const 0) (i32.const 16) (i32.load (i32.const 12))) ) diff --git a/frame/contracts/fixtures/chain_extension_temp_storage.wat b/frame/contracts/fixtures/chain_extension_temp_storage.wat new file mode 100644 index 0000000000000..b481abb5bc7c9 --- /dev/null +++ b/frame/contracts/fixtures/chain_extension_temp_storage.wat @@ -0,0 +1,85 @@ +;; Call chain extension two times with the specified func_ids +;; It then calls itself once +(module + (import "seal0" "seal_call_chain_extension" + (func $seal_call_chain_extension (param i32 i32 i32 i32 i32) (result i32)) + ) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_address" (func $seal_address (param i32 i32))) + (import "seal1" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 16 16)) + + (func $assert (param i32) + (block $ok + (br_if $ok (get_local 0)) + (unreachable) + ) + ) + + ;; [0, 4) len of input buffer: 8 byte (func_ids) + 1byte (stop_recurse) + (data (i32.const 0) "\09") + + ;; [4, 16) buffer for input + + ;; [16, 48] buffer for self address + + ;; [48, 52] len of self address buffer + (data (i32.const 48) "\20") + + (func (export "deploy")) + + (func (export "call") + ;; input: (func_id1: i32, func_id2: i32, stop_recurse: i8) + (call $seal_input (i32.const 4) (i32.const 0)) + + (call $seal_call_chain_extension + (i32.load (i32.const 4)) ;; id + (i32.const 0) ;; input_ptr + (i32.const 0) ;; input_len + (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; output_len_ptr + ) + drop + + (call $seal_call_chain_extension + (i32.load (i32.const 8)) ;; _id + (i32.const 0) ;; input_ptr + (i32.const 0) ;; input_len + (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; output_len_ptr + ) + drop + + (if (i32.eqz (i32.load8_u (i32.const 12))) + (then + ;; stop recursion + (i32.store8 (i32.const 12) (i32.const 1)) + + ;; load own address into buffer + (call $seal_address (i32.const 16) (i32.const 48)) + + ;; call function 2 + 3 of chainext 3 next time + ;; (3 << 16) | 2 + ;; (3 << 16) | 3 + (i32.store (i32.const 4) (i32.const 196610)) + (i32.store (i32.const 8) (i32.const 196611)) + + ;; call self + (call $seal_call + (i32.const 8) ;; Set ALLOW_REENTRY + (i32.const 16) ;; Pointer to "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 512) ;; Pointer to the buffer with value to transfer + (i32.const 4) ;; Pointer to input data buffer address + (i32.load (i32.const 0)) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) + + ;; check that call succeeded of call + (call $assert (i32.eqz)) + ) + (else) + ) + ) +) diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs index dca29c805cec4..65c13bb1fc607 100644 --- a/frame/contracts/proc-macro/src/lib.rs +++ b/frame/contracts/proc-macro/src/lib.rs @@ -15,16 +15,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Proc macros used in the contracts module. +//! Procedural macroses used in the contracts module. +//! +//! Most likely you should use the [`#[define_env]`][`macro@define_env`] attribute macro which hides +//! boilerplate of defining external environment for a wasm module. #![no_std] extern crate alloc; -use alloc::string::ToString; +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; use proc_macro2::TokenStream; -use quote::{quote, quote_spanned}; -use syn::{parse_macro_input, Data, DeriveInput, Ident}; +use quote::{quote, quote_spanned, ToTokens}; +use syn::{parse_macro_input, spanned::Spanned, Data, DeriveInput, Ident}; /// This derives `Debug` for a struct where each field must be of some numeric type. /// It interprets each field as its represents some weight and formats it as times so that @@ -85,7 +92,7 @@ fn derive_debug( /// This is only used then the `full` feature is activated. #[cfg(feature = "full")] fn iterate_fields(data: &syn::DataStruct, fmt: impl Fn(&Ident) -> TokenStream) -> TokenStream { - use syn::{spanned::Spanned, Fields}; + use syn::Fields; match &data.fields { Fields::Named(fields) => { @@ -140,3 +147,392 @@ fn format_default(field: &Ident) -> TokenStream { &self.#field } } + +/// Parsed environment definition. +struct EnvDef { + host_funcs: Vec, +} + +/// Parsed host function definition. +struct HostFn { + item: syn::ItemFn, + module: String, + name: String, + returns: HostFnReturn, +} + +enum HostFnReturn { + Unit, + U32, + ReturnCode, +} + +impl ToTokens for HostFn { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.item.to_tokens(tokens); + } +} + +impl HostFn { + pub fn try_from(item: syn::Item) -> syn::Result { + let err = |span, msg| { + let msg = format!("Invalid host function definition. {}", msg); + syn::Error::new(span, msg) + }; + let msg = "only #[version()] or #[unstable] attribute is allowed."; + let span = item.span(); + let item = match item { + syn::Item::Fn(i_fn) => Ok(i_fn), + _ => Err(err(span, msg)), + }?; + + let name = item.sig.ident.to_string(); + let attrs: Vec<&syn::Attribute> = + item.attrs.iter().filter(|m| !m.path.is_ident("doc")).collect(); + + let module = match attrs.len() { + 0 => Ok("seal0".to_string()), + 1 => { + let attr = &attrs[0]; + let ident = attr.path.get_ident().ok_or(err(span, msg))?.to_string(); + match ident.as_str() { + "version" => { + let ver: syn::LitInt = attr.parse_args()?; + Ok(format!("seal{}", ver.base10_parse::().map_err(|_| err(span, msg))?)) + }, + "unstable" => Ok("__unstable__".to_string()), + _ => Err(err(span, msg)), + } + }, + _ => Err(err(span, msg)), + }?; + + let msg = r#"Should return one of the following: + - Result<(), TrapReason>, + - Result, + - Result"#; + + let ret_ty = match item.clone().sig.output { + syn::ReturnType::Type(_, ty) => Ok(ty.clone()), + _ => Err(err(span, &msg)), + }?; + + match *ret_ty { + syn::Type::Path(tp) => { + let result = &tp.path.segments.last().ok_or(err(span, &msg))?; + let (id, span) = (result.ident.to_string(), result.ident.span()); + id.eq(&"Result".to_string()).then_some(()).ok_or(err(span, &msg))?; + + match &result.arguments { + syn::PathArguments::AngleBracketed(group) => { + if group.args.len() != 2 { + return Err(err(span, &msg)) + }; + + let arg2 = group.args.last().ok_or(err(span, &msg))?; + + let err_ty = match arg2 { + syn::GenericArgument::Type(ty) => Ok(ty.clone()), + _ => Err(err(arg2.span(), &msg)), + }?; + + match err_ty { + syn::Type::Path(tp) => Ok(tp + .path + .segments + .first() + .ok_or(err(arg2.span(), &msg))? + .ident + .to_string()), + _ => Err(err(tp.span(), &msg)), + }? + .eq("TrapReason") + .then_some(()) + .ok_or(err(span, &msg))?; + + let arg1 = group.args.first().ok_or(err(span, &msg))?; + let ok_ty = match arg1 { + syn::GenericArgument::Type(ty) => Ok(ty.clone()), + _ => Err(err(arg1.span(), &msg)), + }?; + let ok_ty_str = match ok_ty { + syn::Type::Path(tp) => Ok(tp + .path + .segments + .first() + .ok_or(err(arg1.span(), &msg))? + .ident + .to_string()), + syn::Type::Tuple(tt) => { + if !tt.elems.is_empty() { + return Err(err(arg1.span(), &msg)) + }; + Ok("()".to_string()) + }, + _ => Err(err(ok_ty.span(), &msg)), + }?; + + let returns = match ok_ty_str.as_str() { + "()" => Ok(HostFnReturn::Unit), + "u32" => Ok(HostFnReturn::U32), + "ReturnCode" => Ok(HostFnReturn::ReturnCode), + _ => Err(err(arg1.span(), &msg)), + }?; + Ok(Self { item, module, name, returns }) + }, + _ => Err(err(span, &msg)), + } + }, + _ => Err(err(span, &msg)), + } + } + + fn to_wasm_sig(&self) -> TokenStream { + let args = self.item.sig.inputs.iter().skip(1).filter_map(|a| match a { + syn::FnArg::Typed(pt) => Some(&pt.ty), + _ => None, + }); + let returns = match &self.returns { + HostFnReturn::U32 => quote! { vec![ ::VALUE_TYPE ] }, + HostFnReturn::ReturnCode => quote! { vec![ ::VALUE_TYPE ] }, + HostFnReturn::Unit => quote! { vec![] }, + }; + + quote! { + wasm_instrument::parity_wasm::elements::FunctionType::new( + vec! [ #(<#args>::VALUE_TYPE),* ], + #returns, + ) + } + } +} +impl EnvDef { + pub fn try_from(item: syn::ItemMod) -> syn::Result { + let span = item.span(); + let err = |msg| syn::Error::new(span, msg); + let items = &item + .content + .as_ref() + .ok_or(err("Invalid environment definition, expected `mod` to be inlined."))? + .1; + + let host_funcs = items + .iter() + .map(|i| HostFn::try_from(i.clone())) + .collect::, _>>()?; + + Ok(Self { host_funcs }) + } +} + +/// Expands environment definiton. +/// Should generate source code for: +/// - wasm import satisfy checks (see `expand_can_satisfy()`); +/// - implementations of the host functions to be added to the wasm runtime environment (see +/// `expand_impls()`). +fn expand_env(def: &mut EnvDef) -> proc_macro2::TokenStream { + let can_satisfy = expand_can_satisfy(def); + let impls = expand_impls(def); + + quote! { + pub struct Env; + #can_satisfy + #impls + } +} + +/// Generates `can_satisfy()` method for every host function, to be used to check +/// these functions versus expected module, name and signatures when imporing them from a wasm +/// module. +fn expand_can_satisfy(def: &mut EnvDef) -> proc_macro2::TokenStream { + let checks = def.host_funcs.iter().map(|f| { + let (module, name, signature) = (&f.module, &f.name, &f.to_wasm_sig()); + quote! { + if module == #module.as_bytes() + && name == #name.as_bytes() + && signature == &#signature + { + return true; + } + } + }); + let satisfy_checks = quote! { + #( #checks )* + }; + + quote! { + impl crate::wasm::env_def::ImportSatisfyCheck for Env { + fn can_satisfy( + module: &[u8], + name: &[u8], + signature: &wasm_instrument::parity_wasm::elements::FunctionType, + ) -> bool { + use crate::wasm::env_def::ConvertibleToWasm; + #[cfg(not(feature = "unstable-interface"))] + if module == b"__unstable__" { + return false; + } + #satisfy_checks + return false; + } + } + } +} + +/// Generates implementation for every host function, to register it in the contract execution +/// environment. +fn expand_impls(def: &mut EnvDef) -> proc_macro2::TokenStream { + let impls = def.host_funcs.iter().map(|f| { + let params = &f.item.sig.inputs.iter().skip(1).map(|arg| { + match arg { + syn::FnArg::Typed(pt) => { + if let syn::Pat::Ident(ident) = &*pt.pat { + let p_type = &pt.ty; + let p_name = ident.ident.clone(); + quote! { + let #p_name : <#p_type as crate::wasm::env_def::ConvertibleToWasm>::NativeType = + args.next() + .and_then(|v| <#p_type as crate::wasm::env_def::ConvertibleToWasm>::from_typed_value(v.clone())) + .expect( + "precondition: all imports should be checked against the signatures of corresponding + functions defined by `#[define_env]` proc macro by the user of the macro; + thus this can never be `None`; + qed;" + ); + } + } else { quote! { } } + }, + _ => quote! { }, + } + }); + + let outline = match &f.returns { + HostFnReturn::Unit => quote! { + body().map_err(|reason| { + ctx.set_trap_reason(reason); + sp_sandbox::HostError + })?; + return Ok(sp_sandbox::ReturnValue::Unit); + }, + _ => quote! { + let r = body().map_err(|reason| { + ctx.set_trap_reason(reason); + sp_sandbox::HostError + })?; + return Ok(sp_sandbox::ReturnValue::Value({ + r.to_typed_value() + })); + }, + }; + let params = params.clone(); + let (module, name, ident, body) = (&f.module, &f.name, &f.item.sig.ident, &f.item.block); + let unstable_feat = match module.as_str() { + "__unstable__" => quote! { #[cfg(feature = "unstable-interface")] }, + _ => quote! { }, + }; + quote! { + #unstable_feat + f(#module.as_bytes(), #name.as_bytes(), { + fn #ident( + ctx: &mut crate::wasm::Runtime, + args: &[sp_sandbox::Value], + ) -> Result + where + ::AccountId: sp_core::crypto::UncheckedFrom<::Hash> + + AsRef<[u8]>, + { + #[allow(unused)] + let mut args = args.iter(); + let mut body = || { + #( #params )* + #body + }; + #outline + } + #ident:: + }); + } + }); + + let packed_impls = quote! { + #( #impls )* + }; + + quote! { + impl crate::wasm::env_def::FunctionImplProvider for Env + where + ::AccountId: + sp_core::crypto::UncheckedFrom<::Hash> + AsRef<[u8]>, + { + fn impls)>(f: &mut F) { + #packed_impls + } + } + } +} + +/// Defines a host functions set that can be imported by contract wasm code. +/// +/// **NB**: Be advised that all functions defined by this macro +/// will panic if called with unexpected arguments. +/// +/// It's up to you as the user of this macro to check signatures of wasm code to be executed +/// and reject the code if any imported function has a mismatched signature. +/// +/// ## Example +/// +/// ```nocompile +/// #[define_env] +/// pub mod some_env { +/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result<(), TrapReason> { +/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) +/// } +/// } +/// ``` +/// This example will expand to the `some_host_fn()` defined in the wasm module named `seal0`. +/// To define a host function in `seal1` and `__unstable__` modules, it should be annotated with the +/// appropriate attribute as follows: +/// +/// ## Example +/// +/// ```nocompile +/// #[define_env] +/// pub mod some_env { +/// #[version(1)] +/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { +/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) +/// } +/// +/// #[unstable] +/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { +/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) +/// } +/// } +/// ``` +/// +/// Only following return types are allowed for the host functions defined with the macro: +/// - `Result<(), TrapReason>`, +/// - `Result`, +/// - `Result`. +/// +/// The macro expands to `pub struct Env` declaration, with the following traits implementations: +/// - `pallet_contracts::wasm::env_def::ImportSatisfyCheck` +/// - `pallet_contracts::wasm::env_def::FunctionImplProvider` +#[proc_macro_attribute] +pub fn define_env( + attr: proc_macro::TokenStream, + item: proc_macro::TokenStream, +) -> proc_macro::TokenStream { + if !attr.is_empty() { + let msg = "Invalid `define_env` attribute macro: expected no attributes: `#[define_env]`."; + let span = proc_macro2::TokenStream::from(attr).span(); + return syn::Error::new(span, msg).to_compile_error().into() + } + + let item = syn::parse_macro_input!(item as syn::ItemMod); + + match EnvDef::try_from(item) { + Ok(mut def) => expand_env(&mut def).into(), + Err(e) => e.to_compile_error().into(), + } +} diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index a81abef9f37ca..7876c7cba40d0 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } serde = { version = "1", features = ["derive"] } # Substrate Dependencies diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs index 0df8f90237ed3..1df7a5753f77e 100644 --- a/frame/contracts/rpc/src/lib.rs +++ b/frame/contracts/rpc/src/lib.rs @@ -229,7 +229,7 @@ where call_request; let value: Balance = decode_hex(value, "balance")?; - let gas_limit: Weight = decode_hex(gas_limit, "weight")?; + let gas_limit: u64 = decode_hex(gas_limit, "weight")?; let storage_deposit_limit: Option = storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; limit_gas(gas_limit)?; @@ -259,7 +259,7 @@ where } = instantiate_request; let value: Balance = decode_hex(value, "balance")?; - let gas_limit: Weight = decode_hex(gas_limit, "weight")?; + let gas_limit: u64 = decode_hex(gas_limit, "weight")?; let storage_deposit_limit: Option = storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; limit_gas(gas_limit)?; diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index bea469bd0f5a9..e29cb51728e1e 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -57,7 +57,7 @@ const INSTR_BENCHMARK_BATCHES: u32 = 50; struct Contract { caller: T::AccountId, account_id: T::AccountId, - addr: ::Source, + addr: AccountIdLookupOf, value: BalanceOf, } @@ -2853,8 +2853,8 @@ benchmarks! { println!("{:#?}", Schedule::::default()); println!("###############################################"); println!("Lazy deletion throughput per block (empty queue, full queue): {}, {}", - weight_limit / weight_per_key, - (weight_limit - weight_per_queue_item * queue_depth) / weight_per_key, + weight_limit / weight_per_key.ref_time(), + (weight_limit - weight_per_queue_item * queue_depth) / weight_per_key.ref_time(), ); } #[cfg(not(feature = "std"))] diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index ed447719933be..d0e0cf5cf95cb 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -29,6 +29,22 @@ //! required for this endeavour are defined or re-exported in this module. There is an //! implementation on `()` which can be used to signal that no chain extension is available. //! +//! # Using multiple chain extensions +//! +//! Often there is a need for having multiple chain extensions. This is often the case when +//! some generally useful off-the-shelf extensions should be included. To have multiple chain +//! extensions they can be put into a tuple which is then passed to [`Config::ChainExtension`] like +//! this `type Extensions = (ExtensionA, ExtensionB)`. +//! +//! However, only extensions implementing [`RegisteredChainExtension`] can be put into a tuple. +//! This is because the [`RegisteredChainExtension::ID`] is used to decide which of those extensions +//! should be used when the contract calls a chain extensions. Extensions which are generally +//! useful should claim their `ID` with [the registry](https://github.com/paritytech/chainextension-registry) +//! so that no collisions with other vendors will occur. +//! +//! **Chain specific extensions must use the reserved `ID = 0` so that they can't be registered with +//! the registry.** +//! //! # Security //! //! The chain author alone is responsible for the security of the chain extension. @@ -68,7 +84,6 @@ pub use crate::{exec::Ext, Config}; pub use frame_system::Config as SysConfig; pub use pallet_contracts_primitives::ReturnFlags; pub use sp_core::crypto::UncheckedFrom; -pub use state::Init as InitState; /// Result that returns a [`DispatchError`] on error. pub type Result = sp_std::result::Result; @@ -78,6 +93,12 @@ pub type Result = sp_std::result::Result; /// In order to create a custom chain extension this trait must be implemented and supplied /// to the pallet contracts configuration trait as the associated type of the same name. /// Consult the [module documentation](self) for a general explanation of chain extensions. +/// +/// # Lifetime +/// +/// The extension will be [`Default`] initialized at the beginning of each call +/// (**not** per call stack) and dropped afterwards. Hence any value held inside the extension +/// can be used as a per-call scratch buffer. pub trait ChainExtension { /// Call the chain extension logic. /// @@ -86,8 +107,6 @@ pub trait ChainExtension { /// imported wasm function. /// /// # Parameters - /// - `func_id`: The first argument to `seal_call_chain_extension`. Usually used to determine - /// which function to realize. /// - `env`: Access to the remaining arguments and the execution environment. /// /// # Return @@ -95,7 +114,7 @@ pub trait ChainExtension { /// In case of `Err` the contract execution is immediately suspended and the passed error /// is returned to the caller. Otherwise the value of [`RetVal`] determines the exit /// behaviour. - fn call(func_id: u32, env: Environment) -> Result + fn call(&mut self, env: Environment) -> Result where E: Ext, ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>; @@ -112,20 +131,51 @@ pub trait ChainExtension { } } -/// Implementation that indicates that no chain extension is available. -impl ChainExtension for () { - fn call(_func_id: u32, mut _env: Environment) -> Result +/// A [`ChainExtension`] that can be composed with other extensions using a tuple. +/// +/// An extension that implements this trait can be put in a tuple in order to have multiple +/// extensions available. The tuple implementation routes requests based on the first two +/// most significant bytes of the `id` passed to `call`. +/// +/// If this extensions is to be used by multiple runtimes consider +/// [registering it](https://github.com/paritytech/chainextension-registry) to ensure that there +/// are no collisions with other vendors. +/// +/// # Note +/// +/// Currently, we support tuples of up to ten registred chain extensions. If more chain extensions +/// are needed consider opening an issue. +pub trait RegisteredChainExtension: ChainExtension { + /// The extensions globally unique identifier. + const ID: u16; +} + +#[impl_trait_for_tuples::impl_for_tuples(10)] +#[tuple_types_custom_trait_bound(RegisteredChainExtension)] +impl ChainExtension for Tuple { + fn call(&mut self, mut env: Environment) -> Result where E: Ext, ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { - // Never called since [`Self::enabled()`] is set to `false`. Because we want to - // avoid panics at all costs we supply a sensible error value here instead - // of an `unimplemented!`. + for_tuples!( + #( + if (Tuple::ID == env.ext_id()) && Tuple::enabled() { + return Tuple.call(env); + } + )* + ); Err(Error::::NoChainExtension.into()) } fn enabled() -> bool { + for_tuples!( + #( + if Tuple::enabled() { + return true; + } + )* + ); false } } @@ -147,7 +197,7 @@ pub enum RetVal { /// /// It uses [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html) /// to enforce the correct usage of the parameters passed to the chain extension. -pub struct Environment<'a, 'b, E: Ext, S: state::State> { +pub struct Environment<'a, 'b, E: Ext, S: State> { /// The actual data of this type. inner: Inner<'a, 'b, E>, /// `S` is only used in the type system but never as value. @@ -155,10 +205,26 @@ pub struct Environment<'a, 'b, E: Ext, S: state::State> { } /// Functions that are available in every state of this type. -impl<'a, 'b, E: Ext, S: state::State> Environment<'a, 'b, E, S> +impl<'a, 'b, E: Ext, S: State> Environment<'a, 'b, E, S> where ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { + /// The function id within the `id` passed by a contract. + /// + /// It returns the two least significant bytes of the `id` passed by a contract as the other + /// two bytes represent the chain extension itself (the code which is calling this function). + pub fn func_id(&self) -> u16 { + (self.inner.id & 0x0000FFFF) as u16 + } + + /// The chain extension id within the `id` passed by a contract. + /// + /// It returns the two most significant bytes of the `id` passed by a contract which represent + /// the chain extension itself (the code which is calling this function). + pub fn ext_id(&self) -> u16 { + (self.inner.id >> 16) as u16 + } + /// Charge the passed `amount` of weight from the overall limit. /// /// It returns `Ok` when there the remaining weight budget is larger than the passed @@ -172,7 +238,7 @@ where /// /// Weight is synonymous with gas in substrate. pub fn charge_weight(&mut self, amount: Weight) -> Result { - self.inner.runtime.charge_gas(RuntimeCosts::ChainExtension(amount)) + self.inner.runtime.charge_gas(RuntimeCosts::ChainExtension(amount.ref_time())) } /// Adjust a previously charged amount down to its actual amount. @@ -182,7 +248,7 @@ where pub fn adjust_weight(&mut self, charged: ChargedAmount, actual_weight: Weight) { self.inner .runtime - .adjust_gas(charged, RuntimeCosts::ChainExtension(actual_weight)) + .adjust_gas(charged, RuntimeCosts::ChainExtension(actual_weight.ref_time())) } /// Grants access to the execution environment of the current contract call. @@ -197,42 +263,43 @@ where /// /// Those are the functions that determine how the arguments to the chain extensions /// should be consumed. -impl<'a, 'b, E: Ext> Environment<'a, 'b, E, state::Init> { +impl<'a, 'b, E: Ext> Environment<'a, 'b, E, InitState> { /// Creates a new environment for consumption by a chain extension. /// /// It is only available to this crate because only the wasm runtime module needs to /// ever create this type. Chain extensions merely consume it. pub(crate) fn new( runtime: &'a mut Runtime<'b, E>, + id: u32, input_ptr: u32, input_len: u32, output_ptr: u32, output_len_ptr: u32, ) -> Self { Environment { - inner: Inner { runtime, input_ptr, input_len, output_ptr, output_len_ptr }, + inner: Inner { runtime, id, input_ptr, input_len, output_ptr, output_len_ptr }, phantom: PhantomData, } } /// Use all arguments as integer values. - pub fn only_in(self) -> Environment<'a, 'b, E, state::OnlyIn> { + pub fn only_in(self) -> Environment<'a, 'b, E, OnlyInState> { Environment { inner: self.inner, phantom: PhantomData } } /// Use input arguments as integer and output arguments as pointer to a buffer. - pub fn prim_in_buf_out(self) -> Environment<'a, 'b, E, state::PrimInBufOut> { + pub fn prim_in_buf_out(self) -> Environment<'a, 'b, E, PrimInBufOutState> { Environment { inner: self.inner, phantom: PhantomData } } /// Use input and output arguments as pointers to a buffer. - pub fn buf_in_buf_out(self) -> Environment<'a, 'b, E, state::BufInBufOut> { + pub fn buf_in_buf_out(self) -> Environment<'a, 'b, E, BufInBufOutState> { Environment { inner: self.inner, phantom: PhantomData } } } /// Functions to use the input arguments as integers. -impl<'a, 'b, E: Ext, S: state::PrimIn> Environment<'a, 'b, E, S> { +impl<'a, 'b, E: Ext, S: PrimIn> Environment<'a, 'b, E, S> { /// The `input_ptr` argument. pub fn val0(&self) -> u32 { self.inner.input_ptr @@ -245,7 +312,7 @@ impl<'a, 'b, E: Ext, S: state::PrimIn> Environment<'a, 'b, E, S> { } /// Functions to use the output arguments as integers. -impl<'a, 'b, E: Ext, S: state::PrimOut> Environment<'a, 'b, E, S> { +impl<'a, 'b, E: Ext, S: PrimOut> Environment<'a, 'b, E, S> { /// The `output_ptr` argument. pub fn val2(&self) -> u32 { self.inner.output_ptr @@ -258,7 +325,7 @@ impl<'a, 'b, E: Ext, S: state::PrimOut> Environment<'a, 'b, E, S> { } /// Functions to use the input arguments as pointer to a buffer. -impl<'a, 'b, E: Ext, S: state::BufIn> Environment<'a, 'b, E, S> +impl<'a, 'b, E: Ext, S: BufIn> Environment<'a, 'b, E, S> where ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { @@ -321,7 +388,7 @@ where } /// Functions to use the output arguments as pointer to a buffer. -impl<'a, 'b, E: Ext, S: state::BufOut> Environment<'a, 'b, E, S> +impl<'a, 'b, E: Ext, S: BufOut> Environment<'a, 'b, E, S> where ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { @@ -344,7 +411,8 @@ where buffer, allow_skip, |len| { - weight_per_byte.map(|w| RuntimeCosts::ChainExtension(w.saturating_mul(len.into()))) + weight_per_byte + .map(|w| RuntimeCosts::ChainExtension(w.ref_time().saturating_mul(len.into()))) }, ) } @@ -359,6 +427,8 @@ struct Inner<'a, 'b, E: Ext> { /// The runtime contains all necessary functions to interact with the running contract. runtime: &'a mut Runtime<'b, E>, /// Verbatim argument passed to `seal_call_chain_extension`. + id: u32, + /// Verbatim argument passed to `seal_call_chain_extension`. input_ptr: u32, /// Verbatim argument passed to `seal_call_chain_extension`. input_len: u32, @@ -368,31 +438,54 @@ struct Inner<'a, 'b, E: Ext> { output_len_ptr: u32, } -/// Private submodule with public types to prevent other modules from naming them. -mod state { - pub trait State {} - - pub trait PrimIn: State {} - pub trait PrimOut: State {} - pub trait BufIn: State {} - pub trait BufOut: State {} - - /// The initial state of an [`Environment`](`super::Environment`). - /// See [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html). - pub enum Init {} - pub enum OnlyIn {} - pub enum PrimInBufOut {} - pub enum BufInBufOut {} - - impl State for Init {} - impl State for OnlyIn {} - impl State for PrimInBufOut {} - impl State for BufInBufOut {} - - impl PrimIn for OnlyIn {} - impl PrimOut for OnlyIn {} - impl PrimIn for PrimInBufOut {} - impl BufOut for PrimInBufOut {} - impl BufIn for BufInBufOut {} - impl BufOut for BufInBufOut {} +/// Any state of an [`Environment`] implements this trait. +/// See [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html). +pub trait State: sealed::Sealed {} + +/// A state that uses primitive inputs. +pub trait PrimIn: State {} + +/// A state that uses primitive outputs. +pub trait PrimOut: State {} + +/// A state that uses a buffer as input. +pub trait BufIn: State {} + +/// A state that uses a buffer as output. +pub trait BufOut: State {} + +/// The initial state of an [`Environment`]. +pub enum InitState {} + +/// A state that uses all arguments as primitive inputs. +pub enum OnlyInState {} + +/// A state that uses two arguments as primitive inputs and the other two as buffer output. +pub enum PrimInBufOutState {} + +/// Uses a buffer for input and a buffer for output. +pub enum BufInBufOutState {} + +mod sealed { + use super::*; + + /// Trait to prevent users from implementing `State` for anything else. + pub trait Sealed {} + + impl Sealed for InitState {} + impl Sealed for OnlyInState {} + impl Sealed for PrimInBufOutState {} + impl Sealed for BufInBufOutState {} + + impl State for InitState {} + impl State for OnlyInState {} + impl State for PrimInBufOutState {} + impl State for BufInBufOutState {} + + impl PrimIn for OnlyInState {} + impl PrimOut for OnlyInState {} + impl PrimIn for PrimInBufOutState {} + impl BufOut for PrimInBufOutState {} + impl BufIn for BufInBufOutState {} + impl BufOut for BufInBufOutState {} } diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index ec7e1e9335182..81e1aef92cbfc 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -664,7 +664,7 @@ where debug_message: Option<&'a mut Vec>, ) -> Result<(Self, E), ExecError> { let (first_frame, executable, nonce) = - Self::new_frame(args, value, gas_meter, storage_meter, 0, schedule)?; + Self::new_frame(args, value, gas_meter, storage_meter, Weight::zero(), schedule)?; let stack = Self { origin, schedule, @@ -797,7 +797,7 @@ where )?; } - // Every call or instantiate also optionally transferres balance. + // Every non delegate call or instantiate also optionally transfers the balance. self.initial_transfer()?; // Call into the wasm blob. @@ -959,8 +959,14 @@ where // The transfer as performed by a call or instantiate. fn initial_transfer(&self) -> DispatchResult { let frame = self.top_frame(); - let value = frame.value_transferred; + // If it is a delegate call, then we've already transferred tokens in the + // last non-delegate frame. + if frame.delegate_caller.is_some() { + return Ok(()) + } + + let value = frame.value_transferred; Self::transfer(ExistenceRequirement::KeepAlive, self.caller(), &frame.account_id, value) } @@ -1083,7 +1089,7 @@ where delegated_call: Some(DelegatedCall { executable, caller: self.caller().clone() }), }, value, - 0, + Weight::zero(), )?; self.run(executable, input_data) } @@ -1309,7 +1315,7 @@ where fn deposit_event(topics: Vec, event: Event) { >::deposit_event_indexed( - &*topics, + &topics, ::Event::from(event).into(), ) } @@ -1410,12 +1416,7 @@ mod tests { loader.counter += 1; loader.map.insert( hash, - MockExecutable { - func: Rc::new(f), - func_type, - code_hash: hash.clone(), - refcount: 1, - }, + MockExecutable { func: Rc::new(f), func_type, code_hash: hash, refcount: 1 }, ); hash }) @@ -1560,6 +1561,82 @@ mod tests { }); } + #[test] + fn correct_transfer_on_call() { + let origin = ALICE; + let dest = BOB; + let value = 55; + + let success_ch = MockLoader::insert(Call, move |ctx, _| { + assert_eq!(ctx.ext.value_transferred(), value); + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + }); + + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + place_contract(&dest, success_ch); + set_balance(&origin, 100); + let balance = get_balance(&dest); + let mut storage_meter = storage::meter::Meter::new(&origin, Some(0), 55).unwrap(); + + let _ = MockStack::run_call( + origin.clone(), + dest.clone(), + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + &schedule, + value, + vec![], + None, + ) + .unwrap(); + + assert_eq!(get_balance(&origin), 100 - value); + assert_eq!(get_balance(&dest), balance + value); + }); + } + + #[test] + fn correct_transfer_on_delegate_call() { + let origin = ALICE; + let dest = BOB; + let value = 35; + + let success_ch = MockLoader::insert(Call, move |ctx, _| { + assert_eq!(ctx.ext.value_transferred(), value); + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + }); + + let delegate_ch = MockLoader::insert(Call, move |ctx, _| { + assert_eq!(ctx.ext.value_transferred(), value); + let _ = ctx.ext.delegate_call(success_ch, Vec::new())?; + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + }); + + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + place_contract(&dest, delegate_ch); + set_balance(&origin, 100); + let balance = get_balance(&dest); + let mut storage_meter = storage::meter::Meter::new(&origin, Some(0), 55).unwrap(); + + let _ = MockStack::run_call( + origin.clone(), + dest.clone(), + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + &schedule, + value, + vec![], + None, + ) + .unwrap(); + + assert_eq!(get_balance(&origin), 100 - value); + assert_eq!(get_balance(&dest), balance + value); + }); + } + #[test] fn changes_are_reverted_on_failing_call() { // This test verifies that changes are reverted on a call which fails (or equally, returns @@ -1748,7 +1825,7 @@ mod tests { let value = Default::default(); let recurse_ch = MockLoader::insert(Call, |ctx, _| { // Try to call into yourself. - let r = ctx.ext.call(0, BOB, 0, vec![], true); + let r = ctx.ext.call(Weight::zero(), BOB, 0, vec![], true); REACHED_BOTTOM.with(|reached_bottom| { let mut reached_bottom = reached_bottom.borrow_mut(); @@ -1803,7 +1880,7 @@ mod tests { .with(|caller| *caller.borrow_mut() = Some(ctx.ext.caller().clone())); // Call into CHARLIE contract. - assert_matches!(ctx.ext.call(0, CHARLIE, 0, vec![], true), Ok(_)); + assert_matches!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), Ok(_)); exec_success() }); let charlie_ch = MockLoader::insert(Call, |ctx, _| { @@ -1934,7 +2011,7 @@ mod tests { // ALICE is the origin of the call stack assert!(ctx.ext.caller_is_origin()); // BOB calls CHARLIE - ctx.ext.call(0, CHARLIE, 0, vec![], true) + ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true) }); ExtBuilder::default().build().execute_with(|| { @@ -1964,7 +2041,7 @@ mod tests { assert_eq!(*ctx.ext.address(), BOB); // Call into charlie contract. - assert_matches!(ctx.ext.call(0, CHARLIE, 0, vec![], true), Ok(_)); + assert_matches!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), Ok(_)); exec_success() }); let charlie_ch = MockLoader::insert(Call, |ctx, _| { @@ -2107,14 +2184,13 @@ mod tests { let dummy_ch = MockLoader::insert(Call, |_, _| exec_success()); let instantiated_contract_address = Rc::new(RefCell::new(None::>)); let instantiator_ch = MockLoader::insert(Call, { - let dummy_ch = dummy_ch.clone(); let instantiated_contract_address = Rc::clone(&instantiated_contract_address); move |ctx, _| { // Instantiate a contract and save it's address in `instantiated_contract_address`. let (address, output) = ctx .ext .instantiate( - 0, + Weight::zero(), dummy_ch, ::Currency::minimum_balance(), vec![], @@ -2170,12 +2246,11 @@ mod tests { fn instantiation_traps() { let dummy_ch = MockLoader::insert(Constructor, |_, _| Err("It's a trap!".into())); let instantiator_ch = MockLoader::insert(Call, { - let dummy_ch = dummy_ch.clone(); move |ctx, _| { // Instantiate a contract and save it's address in `instantiated_contract_address`. assert_matches!( ctx.ext.instantiate( - 0, + Weight::zero(), dummy_ch, ::Currency::minimum_balance(), vec![], @@ -2267,13 +2342,13 @@ mod tests { let info = ctx.ext.contract_info(); assert_eq!(info.storage_deposit, 0); info.storage_deposit = 42; - assert_eq!(ctx.ext.call(0, CHARLIE, 0, vec![], true), exec_trapped()); + assert_eq!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), exec_trapped()); assert_eq!(ctx.ext.contract_info().storage_deposit, 42); } exec_success() }); let code_charlie = MockLoader::insert(Call, |ctx, _| { - assert!(ctx.ext.call(0, BOB, 0, vec![99], true).is_ok()); + assert!(ctx.ext.call(Weight::zero(), BOB, 0, vec![99], true).is_ok()); exec_trapped() }); @@ -2302,7 +2377,7 @@ mod tests { fn recursive_call_during_constructor_fails() { let code = MockLoader::insert(Constructor, |ctx, _| { assert_matches!( - ctx.ext.call(0, ctx.ext.address().clone(), 0, vec![], true), + ctx.ext.call(Weight::zero(), ctx.ext.address().clone(), 0, vec![], true), Err(ExecError{error, ..}) if error == >::ContractNotFound.into() ); exec_success() @@ -2404,7 +2479,7 @@ mod tests { // call the contract passed as input with disabled reentry let code_bob = MockLoader::insert(Call, |ctx, _| { let dest = Decode::decode(&mut ctx.input_data.as_ref()).unwrap(); - ctx.ext.call(0, dest, 0, vec![], false) + ctx.ext.call(Weight::zero(), dest, 0, vec![], false) }); let code_charlie = MockLoader::insert(Call, |_, _| exec_success()); @@ -2449,7 +2524,7 @@ mod tests { fn call_deny_reentry() { let code_bob = MockLoader::insert(Call, |ctx, _| { if ctx.input_data[0] == 0 { - ctx.ext.call(0, CHARLIE, 0, vec![], false) + ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], false) } else { exec_success() } @@ -2457,7 +2532,7 @@ mod tests { // call BOB with input set to '1' let code_charlie = - MockLoader::insert(Call, |ctx, _| ctx.ext.call(0, BOB, 0, vec![1], true)); + MockLoader::insert(Call, |ctx, _| ctx.ext.call(Weight::zero(), BOB, 0, vec![1], true)); ExtBuilder::default().build().execute_with(|| { let schedule = ::Schedule::get(); @@ -2620,18 +2695,30 @@ mod tests { let success_code = MockLoader::insert(Constructor, |_, _| exec_success()); let succ_fail_code = MockLoader::insert(Constructor, move |ctx, _| { ctx.ext - .instantiate(0, fail_code, ctx.ext.minimum_balance() * 100, vec![], &[]) + .instantiate( + Weight::zero(), + fail_code, + ctx.ext.minimum_balance() * 100, + vec![], + &[], + ) .ok(); exec_success() }); let succ_succ_code = MockLoader::insert(Constructor, move |ctx, _| { let (account_id, _) = ctx .ext - .instantiate(0, success_code, ctx.ext.minimum_balance() * 100, vec![], &[]) + .instantiate( + Weight::zero(), + success_code, + ctx.ext.minimum_balance() * 100, + vec![], + &[], + ) .unwrap(); // a plain call should not influence the account counter - ctx.ext.call(0, account_id, 0, vec![], false).unwrap(); + ctx.ext.call(Weight::zero(), account_id, 0, vec![], false).unwrap(); exec_success() }); diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 41df125da0170..ae20e4eeb0def 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -107,7 +107,7 @@ where /// /// Passing `0` as amount is interpreted as "all remaining gas". pub fn nested(&mut self, amount: Weight) -> Result { - let amount = if amount == 0 { self.gas_left } else { amount }; + let amount = if amount == Weight::zero() { self.gas_left } else { amount }; // NOTE that it is ok to allocate all available gas since it still ensured // by `charge` that it doesn't reach zero. @@ -121,7 +121,7 @@ where /// Absorb the remaining gas of a nested meter after we are done using it. pub fn absorb_nested(&mut self, nested: Self) { - if self.gas_left == 0 { + if self.gas_left == Weight::zero() { // All of the remaining gas was inherited by the nested gas meter. When absorbing // we can therefore safely inherit the lowest gas that the nested gas meter experienced // as long as it is lower than the lowest gas that was experienced by the parent. @@ -157,7 +157,7 @@ where } let amount = token.weight(); - let new_value = self.gas_left.checked_sub(amount); + let new_value = self.gas_left.checked_sub(&amount); // We always consume the gas even if there is not enough gas. self.gas_left = new_value.unwrap_or_else(Zero::zero); @@ -227,7 +227,7 @@ where #[cfg(test)] mod tests { - use super::{GasMeter, Token}; + use super::{GasMeter, Token, Weight}; use crate::tests::Test; /// A simple utility macro that helps to match against a @@ -271,20 +271,20 @@ mod tests { #[derive(Copy, Clone, PartialEq, Eq, Debug)] struct SimpleToken(u64); impl Token for SimpleToken { - fn weight(&self) -> u64 { - self.0 + fn weight(&self) -> Weight { + Weight::from_ref_time(self.0) } } #[test] fn it_works() { - let gas_meter = GasMeter::::new(50000); - assert_eq!(gas_meter.gas_left(), 50000); + let gas_meter = GasMeter::::new(Weight::from_ref_time(50000)); + assert_eq!(gas_meter.gas_left(), Weight::from_ref_time(50000)); } #[test] fn tracing() { - let mut gas_meter = GasMeter::::new(50000); + let mut gas_meter = GasMeter::::new(Weight::from_ref_time(50000)); assert!(!gas_meter.charge(SimpleToken(1)).is_err()); let mut tokens = gas_meter.tokens().iter(); @@ -294,7 +294,7 @@ mod tests { // This test makes sure that nothing can be executed if there is no gas. #[test] fn refuse_to_execute_anything_if_zero() { - let mut gas_meter = GasMeter::::new(0); + let mut gas_meter = GasMeter::::new(Weight::zero()); assert!(gas_meter.charge(SimpleToken(1)).is_err()); } @@ -305,7 +305,7 @@ mod tests { // if the gas meter runs out of gas. However, this is just a nice property to have. #[test] fn overcharge_is_unrecoverable() { - let mut gas_meter = GasMeter::::new(200); + let mut gas_meter = GasMeter::::new(Weight::from_ref_time(200)); // The first charge is should lead to OOG. assert!(gas_meter.charge(SimpleToken(300)).is_err()); @@ -318,7 +318,7 @@ mod tests { // possible. #[test] fn charge_exact_amount() { - let mut gas_meter = GasMeter::::new(25); + let mut gas_meter = GasMeter::::new(Weight::from_ref_time(25)); assert!(!gas_meter.charge(SimpleToken(25)).is_err()); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 60b30ffa25005..ee0db17ade95b 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -110,7 +110,7 @@ use frame_support::{ dispatch::Dispatchable, ensure, traits::{ConstU32, Contains, Currency, Get, Randomness, ReservableCurrency, Time}, - weights::{DispatchClass, GetDispatchInfo, Pays, PostDispatchInfo, Weight}, + weights::{DispatchClass, GetDispatchInfo, Pays, PostDispatchInfo, RefTimeWeight, Weight}, BoundedVec, }; use frame_system::{limits::BlockWeights, Pallet as System}; @@ -136,6 +136,7 @@ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type CodeVec = BoundedVec::MaxCodeLen>; type RelaxedCodeVec = BoundedVec::RelaxedMaxCodeLen>; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// Used as a sentinel value when reading and writing contract memory. /// @@ -213,7 +214,7 @@ impl, const P: u32> Get for DefaultContractAccessWe .get(DispatchClass::Normal) .max_total .unwrap_or(block_weights.max_block) / - Weight::from(P) + RefTimeWeight::from(P) } } @@ -280,7 +281,7 @@ pub mod pallet { type WeightInfo: WeightInfo; /// Type that allows the runtime authors to add new host functions for a contract to call. - type ChainExtension: chain_extension::ChainExtension; + type ChainExtension: chain_extension::ChainExtension + Default; /// Cost schedule and limits. #[pallet::constant] @@ -435,7 +436,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] pub fn call( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, #[pallet::compact] value: BalanceOf, #[pallet::compact] gas_limit: Weight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, @@ -617,7 +618,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_code())] pub fn set_code( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, code_hash: CodeHash, ) -> DispatchResult { ensure_root(origin)?; @@ -872,8 +873,8 @@ where ); ContractExecResult { result: output.result.map_err(|r| r.error), - gas_consumed: output.gas_meter.gas_consumed(), - gas_required: output.gas_meter.gas_required(), + gas_consumed: output.gas_meter.gas_consumed().ref_time(), + gas_required: output.gas_meter.gas_required().ref_time(), storage_deposit: output.storage_deposit, debug_message: debug_message.unwrap_or_default(), } @@ -917,8 +918,8 @@ where .result .map(|(account_id, result)| InstantiateReturnValue { result, account_id }) .map_err(|e| e.error), - gas_consumed: output.gas_meter.gas_consumed(), - gas_required: output.gas_meter.gas_required(), + gas_consumed: output.gas_meter.gas_consumed().ref_time(), + gas_required: output.gas_meter.gas_required().ref_time(), storage_deposit: output.storage_deposit, debug_message: debug_message.unwrap_or_default(), } diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index 19e699a855461..9d90cf38269b1 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -26,7 +26,7 @@ use sp_std::{marker::PhantomData, prelude::*}; /// Wrapper for all migrations of this pallet, based on `StorageVersion`. pub fn migrate() -> Weight { let version = StorageVersion::get::>(); - let mut weight: Weight = 0; + let mut weight = Weight::new(); if version < 4 { weight = weight.saturating_add(v4::migrate::()); @@ -127,7 +127,7 @@ mod v5 { type DeletionQueue = StorageValue, Vec>; pub fn migrate() -> Weight { - let mut weight: Weight = 0; + let mut weight = Weight::new(); >::translate(|_key, old: OldContractInfo| { weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); @@ -216,7 +216,7 @@ mod v6 { type OwnerInfoOf = StorageMap, Identity, CodeHash, OwnerInfo>; pub fn migrate() -> Weight { - let mut weight: Weight = 0; + let mut weight = Weight::new(); >::translate(|_key, old: OldContractInfo| { weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index 907ce9e088648..b5c80642a5356 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -21,7 +21,7 @@ use crate::{weights::WeightInfo, Config}; use codec::{Decode, Encode}; -use frame_support::{weights::Weight, DefaultNoBound}; +use frame_support::{weights::RefTimeWeight, DefaultNoBound}; use pallet_contracts_proc_macro::{ScheduleDebug, WeightDebug}; use scale_info::TypeInfo; #[cfg(feature = "std")] @@ -255,166 +255,166 @@ pub struct InstructionWeights { #[scale_info(skip_type_params(T))] pub struct HostFnWeights { /// Weight of calling `seal_caller`. - pub caller: Weight, + pub caller: RefTimeWeight, /// Weight of calling `seal_is_contract`. - pub is_contract: Weight, + pub is_contract: RefTimeWeight, /// Weight of calling `seal_code_hash`. - pub code_hash: Weight, + pub code_hash: RefTimeWeight, /// Weight of calling `seal_own_code_hash`. - pub own_code_hash: Weight, + pub own_code_hash: RefTimeWeight, /// Weight of calling `seal_caller_is_origin`. - pub caller_is_origin: Weight, + pub caller_is_origin: RefTimeWeight, /// Weight of calling `seal_address`. - pub address: Weight, + pub address: RefTimeWeight, /// Weight of calling `seal_gas_left`. - pub gas_left: Weight, + pub gas_left: RefTimeWeight, /// Weight of calling `seal_balance`. - pub balance: Weight, + pub balance: RefTimeWeight, /// Weight of calling `seal_value_transferred`. - pub value_transferred: Weight, + pub value_transferred: RefTimeWeight, /// Weight of calling `seal_minimum_balance`. - pub minimum_balance: Weight, + pub minimum_balance: RefTimeWeight, /// Weight of calling `seal_block_number`. - pub block_number: Weight, + pub block_number: RefTimeWeight, /// Weight of calling `seal_now`. - pub now: Weight, + pub now: RefTimeWeight, /// Weight of calling `seal_weight_to_fee`. - pub weight_to_fee: Weight, + pub weight_to_fee: RefTimeWeight, /// Weight of calling `gas`. - pub gas: Weight, + pub gas: RefTimeWeight, /// Weight of calling `seal_input`. - pub input: Weight, + pub input: RefTimeWeight, /// Weight per input byte copied to contract memory by `seal_input`. - pub input_per_byte: Weight, + pub input_per_byte: RefTimeWeight, /// Weight of calling `seal_return`. - pub r#return: Weight, + pub r#return: RefTimeWeight, /// Weight per byte returned through `seal_return`. - pub return_per_byte: Weight, + pub return_per_byte: RefTimeWeight, /// Weight of calling `seal_terminate`. - pub terminate: Weight, + pub terminate: RefTimeWeight, /// Weight of calling `seal_random`. - pub random: Weight, + pub random: RefTimeWeight, /// Weight of calling `seal_reposit_event`. - pub deposit_event: Weight, + pub deposit_event: RefTimeWeight, /// Weight per topic supplied to `seal_deposit_event`. - pub deposit_event_per_topic: Weight, + pub deposit_event_per_topic: RefTimeWeight, /// Weight per byte of an event deposited through `seal_deposit_event`. - pub deposit_event_per_byte: Weight, + pub deposit_event_per_byte: RefTimeWeight, /// Weight of calling `seal_debug_message`. - pub debug_message: Weight, + pub debug_message: RefTimeWeight, /// Weight of calling `seal_set_storage`. - pub set_storage: Weight, + pub set_storage: RefTimeWeight, /// Weight per written byten of an item stored with `seal_set_storage`. - pub set_storage_per_new_byte: Weight, + pub set_storage_per_new_byte: RefTimeWeight, /// Weight per overwritten byte of an item stored with `seal_set_storage`. - pub set_storage_per_old_byte: Weight, + pub set_storage_per_old_byte: RefTimeWeight, /// Weight of calling `seal_set_code_hash`. - pub set_code_hash: Weight, + pub set_code_hash: RefTimeWeight, /// Weight of calling `seal_clear_storage`. - pub clear_storage: Weight, + pub clear_storage: RefTimeWeight, /// Weight of calling `seal_clear_storage` per byte of the stored item. - pub clear_storage_per_byte: Weight, + pub clear_storage_per_byte: RefTimeWeight, /// Weight of calling `seal_contains_storage`. - pub contains_storage: Weight, + pub contains_storage: RefTimeWeight, /// Weight of calling `seal_contains_storage` per byte of the stored item. - pub contains_storage_per_byte: Weight, + pub contains_storage_per_byte: RefTimeWeight, /// Weight of calling `seal_get_storage`. - pub get_storage: Weight, + pub get_storage: RefTimeWeight, /// Weight per byte of an item received via `seal_get_storage`. - pub get_storage_per_byte: Weight, + pub get_storage_per_byte: RefTimeWeight, /// Weight of calling `seal_take_storage`. - pub take_storage: Weight, + pub take_storage: RefTimeWeight, /// Weight per byte of an item received via `seal_take_storage`. - pub take_storage_per_byte: Weight, + pub take_storage_per_byte: RefTimeWeight, /// Weight of calling `seal_transfer`. - pub transfer: Weight, + pub transfer: RefTimeWeight, /// Weight of calling `seal_call`. - pub call: Weight, + pub call: RefTimeWeight, /// Weight of calling `seal_delegate_call`. - pub delegate_call: Weight, + pub delegate_call: RefTimeWeight, /// Weight surcharge that is claimed if `seal_call` does a balance transfer. - pub call_transfer_surcharge: Weight, + pub call_transfer_surcharge: RefTimeWeight, /// Weight per byte that is cloned by supplying the `CLONE_INPUT` flag. - pub call_per_cloned_byte: Weight, + pub call_per_cloned_byte: RefTimeWeight, /// Weight of calling `seal_instantiate`. - pub instantiate: Weight, + pub instantiate: RefTimeWeight, /// Weight surcharge that is claimed if `seal_instantiate` does a balance transfer. - pub instantiate_transfer_surcharge: Weight, + pub instantiate_transfer_surcharge: RefTimeWeight, /// Weight per salt byte supplied to `seal_instantiate`. - pub instantiate_per_salt_byte: Weight, + pub instantiate_per_salt_byte: RefTimeWeight, /// Weight of calling `seal_hash_sha_256`. - pub hash_sha2_256: Weight, + pub hash_sha2_256: RefTimeWeight, /// Weight per byte hashed by `seal_hash_sha_256`. - pub hash_sha2_256_per_byte: Weight, + pub hash_sha2_256_per_byte: RefTimeWeight, /// Weight of calling `seal_hash_keccak_256`. - pub hash_keccak_256: Weight, + pub hash_keccak_256: RefTimeWeight, /// Weight per byte hashed by `seal_hash_keccak_256`. - pub hash_keccak_256_per_byte: Weight, + pub hash_keccak_256_per_byte: RefTimeWeight, /// Weight of calling `seal_hash_blake2_256`. - pub hash_blake2_256: Weight, + pub hash_blake2_256: RefTimeWeight, /// Weight per byte hashed by `seal_hash_blake2_256`. - pub hash_blake2_256_per_byte: Weight, + pub hash_blake2_256_per_byte: RefTimeWeight, /// Weight of calling `seal_hash_blake2_128`. - pub hash_blake2_128: Weight, + pub hash_blake2_128: RefTimeWeight, /// Weight per byte hashed by `seal_hash_blake2_128`. - pub hash_blake2_128_per_byte: Weight, + pub hash_blake2_128_per_byte: RefTimeWeight, /// Weight of calling `seal_ecdsa_recover`. - pub ecdsa_recover: Weight, + pub ecdsa_recover: RefTimeWeight, /// Weight of calling `seal_ecdsa_to_eth_address`. - pub ecdsa_to_eth_address: Weight, + pub ecdsa_to_eth_address: RefTimeWeight, /// The type parameter is used in the default implementation. #[codec(skip)] @@ -435,19 +435,19 @@ macro_rules! call_zero { macro_rules! cost_args { ($name:ident, $( $arg: expr ),+) => { - (T::WeightInfo::$name($( $arg ),+).saturating_sub(call_zero!($name, $( $arg ),+))) + (T::WeightInfo::$name($( $arg ),+).saturating_sub(call_zero!($name, $( $arg ),+))).ref_time() } } macro_rules! cost_batched_args { ($name:ident, $( $arg: expr ),+) => { - cost_args!($name, $( $arg ),+) / Weight::from(API_BENCHMARK_BATCH_SIZE) + cost_args!($name, $( $arg ),+) / RefTimeWeight::from(API_BENCHMARK_BATCH_SIZE) } } macro_rules! cost_instr_no_params_with_batch_size { ($name:ident, $batch_size:expr) => { - (cost_args!($name, 1) / Weight::from($batch_size)) as u32 + (cost_args!($name, 1) / RefTimeWeight::from($batch_size)) as u32 }; } diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index ffd8e441a8e00..7f1e3e96c4b86 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -28,7 +28,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::{DispatchError, DispatchResult}, storage::child::{self, ChildInfo}, - weights::Weight, + weights::{RefTimeWeight, Weight}, }; use scale_info::TypeInfo; use sp_core::crypto::UncheckedFrom; @@ -227,9 +227,11 @@ where let base_weight = T::WeightInfo::on_process_deletion_queue_batch(); let weight_per_queue_item = T::WeightInfo::on_initialize_per_queue_item(1) - T::WeightInfo::on_initialize_per_queue_item(0); - let weight_per_key = T::WeightInfo::on_initialize_per_trie_key(1) - - T::WeightInfo::on_initialize_per_trie_key(0); - let decoding_weight = weight_per_queue_item.saturating_mul(queue_len as Weight); + let weight_per_key = (T::WeightInfo::on_initialize_per_trie_key(1) - + T::WeightInfo::on_initialize_per_trie_key(0)) + .ref_time(); + let decoding_weight = + weight_per_queue_item.scalar_saturating_mul(queue_len as RefTimeWeight); // `weight_per_key` being zero makes no sense and would constitute a failure to // benchmark properly. We opt for not removing any keys at all in this case. @@ -237,7 +239,8 @@ where .saturating_sub(base_weight) .saturating_sub(decoding_weight) .checked_div(weight_per_key) - .unwrap_or(0) as u32; + .unwrap_or(Weight::zero()) + .ref_time() as u32; (weight_per_key, key_budget) } @@ -248,7 +251,7 @@ where pub fn process_deletion_queue_batch(weight_limit: Weight) -> Weight { let queue_len = >::decode_len().unwrap_or(0); if queue_len == 0 { - return 0 + return Weight::zero() } let (weight_per_key, mut remaining_key_budget) = @@ -282,7 +285,10 @@ where } >::put(queue); - weight_limit.saturating_sub(weight_per_key.saturating_mul(remaining_key_budget as Weight)) + let ref_time_weight = weight_limit + .ref_time() + .saturating_sub(weight_per_key.saturating_mul(remaining_key_budget as RefTimeWeight)); + Weight::from_ref_time(ref_time_weight) } /// Generates a unique trie id by returning `hash(account_id ++ nonce)`. diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index bbac18142a658..3571434bb823b 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -17,8 +17,8 @@ use crate::{ chain_extension::{ - ChainExtension, Environment, Ext, InitState, Result as ExtensionResult, RetVal, - ReturnFlags, SysConfig, UncheckedFrom, + ChainExtension, Environment, Ext, InitState, RegisteredChainExtension, + Result as ExtensionResult, RetVal, ReturnFlags, SysConfig, UncheckedFrom, }, exec::{FixSizedKey, Frame}, storage::Storage, @@ -118,6 +118,17 @@ pub struct TestExtension { last_seen_inputs: (u32, u32, u32, u32), } +#[derive(Default)] +pub struct RevertingExtension; + +#[derive(Default)] +pub struct DisabledExtension; + +#[derive(Default)] +pub struct TempStorageExtension { + storage: u32, +} + impl TestExtension { fn disable() { TEST_EXTENSION.with(|e| e.borrow_mut().enabled = false) @@ -128,7 +139,7 @@ impl TestExtension { } fn last_seen_inputs() -> (u32, u32, u32, u32) { - TEST_EXTENSION.with(|e| e.borrow().last_seen_inputs.clone()) + TEST_EXTENSION.with(|e| e.borrow().last_seen_inputs) } } @@ -139,36 +150,38 @@ impl Default for TestExtension { } impl ChainExtension for TestExtension { - fn call(func_id: u32, env: Environment) -> ExtensionResult + fn call(&mut self, env: Environment) -> ExtensionResult where E: Ext, ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { + let func_id = env.func_id(); + let id = env.ext_id() as u32 | func_id as u32; match func_id { - 0 => { + 0x8000 => { let mut env = env.buf_in_buf_out(); - let input = env.read(2)?; + let input = env.read(8)?; env.write(&input, false, None)?; TEST_EXTENSION.with(|e| e.borrow_mut().last_seen_buffer = input); - Ok(RetVal::Converging(func_id)) + Ok(RetVal::Converging(id)) }, - 1 => { + 0x8001 => { let env = env.only_in(); TEST_EXTENSION.with(|e| { e.borrow_mut().last_seen_inputs = (env.val0(), env.val1(), env.val2(), env.val3()) }); - Ok(RetVal::Converging(func_id)) + Ok(RetVal::Converging(id)) }, - 2 => { + 0x8002 => { let mut env = env.buf_in_buf_out(); - let weight = env.read(2)?[1].into(); + let weight = Weight::from_ref_time(env.read(5)?[4].into()); env.charge_weight(weight)?; - Ok(RetVal::Converging(func_id)) + Ok(RetVal::Converging(id)) }, - 3 => Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![42, 99] }), + 0x8003 => Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![42, 99] }), _ => { - panic!("Passed unknown func_id to test chain extension: {}", func_id); + panic!("Passed unknown id to test chain extension: {}", func_id); }, } } @@ -178,6 +191,77 @@ impl ChainExtension for TestExtension { } } +impl RegisteredChainExtension for TestExtension { + const ID: u16 = 0; +} + +impl ChainExtension for RevertingExtension { + fn call(&mut self, _env: Environment) -> ExtensionResult + where + E: Ext, + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, + { + Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![0x4B, 0x1D] }) + } + + fn enabled() -> bool { + TEST_EXTENSION.with(|e| e.borrow().enabled) + } +} + +impl RegisteredChainExtension for RevertingExtension { + const ID: u16 = 1; +} + +impl ChainExtension for DisabledExtension { + fn call(&mut self, _env: Environment) -> ExtensionResult + where + E: Ext, + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, + { + panic!("Disabled chain extensions are never called") + } + + fn enabled() -> bool { + false + } +} + +impl RegisteredChainExtension for DisabledExtension { + const ID: u16 = 2; +} + +impl ChainExtension for TempStorageExtension { + fn call(&mut self, env: Environment) -> ExtensionResult + where + E: Ext, + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, + { + let func_id = env.func_id(); + match func_id { + 0 => self.storage = 42, + 1 => assert_eq!(self.storage, 42, "Storage is preserved inside the same call."), + 2 => { + assert_eq!(self.storage, 0, "Storage is different for different calls."); + self.storage = 99; + }, + 3 => assert_eq!(self.storage, 99, "Storage is preserved inside the same call."), + _ => { + panic!("Passed unknown id to test chain extension: {}", func_id); + }, + } + Ok(RetVal::Converging(0)) + } + + fn enabled() -> bool { + TEST_EXTENSION.with(|e| e.borrow().enabled) + } +} + +impl RegisteredChainExtension for TempStorageExtension { + const ID: u16 = 3; +} + parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); @@ -248,7 +332,7 @@ parameter_types! { impl Convert> for Test { fn convert(w: Weight) -> BalanceOf { - w + w.ref_time() } } @@ -271,6 +355,10 @@ impl Contains for TestFilter { } } +parameter_types! { + pub const DeletionWeightLimit: Weight = Weight::from_ref_time(500_000_000_000); +} + impl Config for Test { type Time = Timestamp; type Randomness = Randomness; @@ -281,9 +369,10 @@ impl Config for Test { type CallStack = [Frame; 31]; type WeightPrice = Self; type WeightInfo = (); - type ChainExtension = TestExtension; + type ChainExtension = + (TestExtension, DisabledExtension, RevertingExtension, TempStorageExtension); type DeletionQueueDepth = ConstU32<1024>; - type DeletionWeightLimit = ConstU64<500_000_000_000>; + type DeletionWeightLimit = DeletionWeightLimit; type Schedule = MySchedule; type DepositPerByte = DepositPerByte; type DepositPerItem = DepositPerItem; @@ -299,7 +388,7 @@ pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); -pub const GAS_LIMIT: Weight = 100_000_000_000; +pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000); pub struct ExtBuilder { existential_deposit: u64, @@ -352,6 +441,29 @@ fn initialize_block(number: u64) { System::initialize(&number, &[0u8; 32].into(), &Default::default()); } +struct ExtensionInput<'a> { + extension_id: u16, + func_id: u16, + extra: &'a [u8], +} + +impl<'a> ExtensionInput<'a> { + fn to_vec(&self) -> Vec { + ((self.extension_id as u32) << 16 | (self.func_id as u32)) + .to_le_bytes() + .iter() + .chain(self.extra) + .cloned() + .collect() + } +} + +impl<'a> From> for Vec { + fn from(input: ExtensionInput) -> Vec { + input.to_vec() + } +} + // Perform a call to a plain account. // The actual transfer fails because we can only call contracts. // Then we check that at least the base costs where charged (no runtime gas costs.) @@ -534,7 +646,7 @@ fn run_out_of_gas() { Origin::signed(ALICE), addr, // newly created account 0, - 1_000_000_000_000, + Weight::from_ref_time(1_000_000_000_000), None, vec![], ), @@ -1538,39 +1650,145 @@ fn chain_extension_works() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - // The contract takes a up to 2 byte buffer where the first byte passed is used as - // as func_id to the chain extension which behaves differently based on the - // func_id. - - // 0 = read input buffer and pass it through as output + // 0x8000 = read input buffer and pass it through as output + let input: Vec = + ExtensionInput { extension_id: 0, func_id: 0x8000, extra: &[99] }.into(); let result = - Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![0, 99], false); - let gas_consumed = result.gas_consumed; - assert_eq!(TestExtension::last_seen_buffer(), vec![0, 99]); - assert_eq!(result.result.unwrap().data, Bytes(vec![0, 99])); + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false); + assert_eq!(TestExtension::last_seen_buffer(), input); + assert_eq!(result.result.unwrap().data, Bytes(input)); - // 1 = treat inputs as integer primitives and store the supplied integers - Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![1], false) - .result - .unwrap(); + // 0x8001 = treat inputs as integer primitives and store the supplied integers + Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + ExtensionInput { extension_id: 0, func_id: 0x8001, extra: &[] }.into(), + false, + ) + .result + .unwrap(); // those values passed in the fixture - assert_eq!(TestExtension::last_seen_inputs(), (4, 1, 16, 12)); + assert_eq!(TestExtension::last_seen_inputs(), (4, 4, 16, 12)); - // 2 = charge some extra weight (amount supplied in second byte) - let result = - Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![2, 42], false); + // 0x8002 = charge some extra weight (amount supplied in the fifth byte) + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + ExtensionInput { extension_id: 0, func_id: 0x8002, extra: &[0] }.into(), + false, + ); + assert_ok!(result.result); + let gas_consumed = result.gas_consumed; + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + ExtensionInput { extension_id: 0, func_id: 0x8002, extra: &[42] }.into(), + false, + ); assert_ok!(result.result); assert_eq!(result.gas_consumed, gas_consumed + 42); + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + ExtensionInput { extension_id: 0, func_id: 0x8002, extra: &[95] }.into(), + false, + ); + assert_ok!(result.result); + assert_eq!(result.gas_consumed, gas_consumed + 95); - // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer - let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![3], false) - .result - .unwrap(); + // 0x8003 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + ExtensionInput { extension_id: 0, func_id: 0x8003, extra: &[] }.into(), + false, + ) + .result + .unwrap(); assert_eq!(result.flags, ReturnFlags::REVERT); assert_eq!(result.data, Bytes(vec![42, 99])); + + // diverging to second chain extension that sets flags to 0x1 and returns a fixed buffer + // We set the MSB part to 1 (instead of 0) which routes the request into the second + // extension + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into(), + false, + ) + .result + .unwrap(); + assert_eq!(result.flags, ReturnFlags::REVERT); + assert_eq!(result.data, Bytes(vec![0x4B, 0x1D])); + + // Diverging to third chain extension that is disabled + // We set the MSB part to 2 (instead of 0) which routes the request into the third extension + assert_err_ignore_postinfo!( + Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + None, + ExtensionInput { extension_id: 2, func_id: 0, extra: &[] }.into(), + ), + Error::::NoChainExtension, + ); }); } +#[test] +fn chain_extension_temp_storage_works() { + let (code, hash) = compile_module::("chain_extension_temp_storage").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = ::Currency::minimum_balance(); + let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + min_balance * 100, + GAS_LIMIT, + None, + code, + vec![], + vec![], + ),); + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + + // Call func 0 and func 1 back to back. + let stop_recursion = 0u8; + let mut input: Vec = ExtensionInput { extension_id: 3, func_id: 0, extra: &[] }.into(); + input.extend_from_slice( + ExtensionInput { extension_id: 3, func_id: 1, extra: &[stop_recursion] } + .to_vec() + .as_ref(), + ); + + assert_ok!( + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false) + .result + ); + }) +} + #[test] fn lazy_removal_works() { let (code, hash) = compile_module::("self_destruct").unwrap(); @@ -1612,7 +1830,7 @@ fn lazy_removal_works() { assert_matches!(child::get(trie, &[99]), Some(42)); // Run the lazy removal - Contracts::on_idle(System::block_number(), Weight::max_value()); + Contracts::on_idle(System::block_number(), Weight::MAX); // Value should be gone now assert_matches!(child::get::(trie, &[99]), None); @@ -1682,7 +1900,7 @@ fn lazy_batch_removal_works() { } // Run single lazy removal - Contracts::on_idle(System::block_number(), Weight::max_value()); + Contracts::on_idle(System::block_number(), Weight::MAX); // The single lazy removal should have removed all queued tries for trie in tries.iter() { @@ -1697,7 +1915,7 @@ fn lazy_removal_partial_remove_works() { // We create a contract with some extra keys above the weight limit let extra_keys = 7u32; - let weight_limit = 5_000_000_000; + let weight_limit = Weight::from_ref_time(5_000_000_000); let (_, max_keys) = Storage::::deletion_budget(1, weight_limit); let vals: Vec<_> = (0..max_keys + extra_keys) .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) @@ -1871,7 +2089,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { assert_matches!(child::get::(trie, &[99]), Some(42)); // Run on_idle with max remaining weight, this should remove the value - Contracts::on_idle(System::block_number(), Weight::max_value()); + Contracts::on_idle(System::block_number(), Weight::MAX); // Value should be gone assert_matches!(child::get::(trie, &[99]), None); @@ -1882,7 +2100,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { fn lazy_removal_does_not_use_all_weight() { let (code, hash) = compile_module::("self_destruct").unwrap(); - let weight_limit = 5_000_000_000; + let weight_limit = Weight::from_ref_time(5_000_000_000); let mut ext = ExtBuilder::default().existential_deposit(50).build(); let (trie, vals, weight_per_key) = ext.execute_with(|| { @@ -1953,7 +2171,7 @@ fn lazy_removal_does_not_use_all_weight() { let weight_used = Storage::::process_deletion_queue_batch(weight_limit); // We have one less key in our trie than our weight limit suffices for - assert_eq!(weight_used, weight_limit - weight_per_key); + assert_eq!(weight_used, weight_limit - Weight::from_ref_time(weight_per_key)); // All the keys are removed for val in vals { @@ -2108,7 +2326,7 @@ fn reinstrument_does_charge() { assert!(result2.gas_consumed > result1.gas_consumed); assert_eq!( result2.gas_consumed, - result1.gas_consumed + ::WeightInfo::reinstrument(code_len), + result1.gas_consumed + ::WeightInfo::reinstrument(code_len).ref_time(), ); }); } @@ -2216,7 +2434,7 @@ fn gas_estimation_nested_call_fixed_limit() { let input: Vec = AsRef::<[u8]>::as_ref(&addr_callee) .iter() .cloned() - .chain((GAS_LIMIT / 5).to_le_bytes()) + .chain((GAS_LIMIT / 5).ref_time().to_le_bytes()) .collect(); // Call in order to determine the gas that is required for this call @@ -2240,7 +2458,7 @@ fn gas_estimation_nested_call_fixed_limit() { ALICE, addr_caller, 0, - result.gas_required, + Weight::from_ref_time(result.gas_required), Some(result.storage_deposit.charge_or_zero()), input, false, @@ -2310,7 +2528,7 @@ fn gas_estimation_call_runtime() { ALICE, addr_caller, 0, - result.gas_required, + Weight::from_ref_time(result.gas_required), None, call.encode(), false, @@ -3264,8 +3482,8 @@ fn set_code_hash() { phase: Phase::Initialization, event: Event::Contracts(crate::Event::ContractCodeUpdated { contract: contract_addr.clone(), - new_code_hash: new_code_hash.clone(), - old_code_hash: code_hash.clone(), + new_code_hash, + old_code_hash: code_hash, }), topics: vec![], }, diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 10de436bfb155..659caafa7f0b0 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -215,17 +215,19 @@ impl Token for CodeToken { use self::CodeToken::*; // In case of `Load` we already covered the general costs of // calling the storage but still need to account for the actual size of the - // contract code. This is why we substract `T::*::(0)`. We need to do this at this + // contract code. This is why we subtract `T::*::(0)`. We need to do this at this // point because when charging the general weight for calling the contract we not know the // size of the contract. - match *self { + let ref_time_weight = match *self { Reinstrument(len) => T::WeightInfo::reinstrument(len), Load(len) => { let computation = T::WeightInfo::call_with_code_per_byte(len) .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)); - let bandwith = T::ContractAccessWeight::get().saturating_mul(len.into()); - computation.max(bandwith) + let bandwidth = T::ContractAccessWeight::get().scalar_saturating_mul(len as u64); + computation.max(bandwidth) }, - } + }; + + ref_time_weight } } diff --git a/frame/contracts/src/wasm/env_def/macros.rs b/frame/contracts/src/wasm/env_def/macros.rs deleted file mode 100644 index aa5a1626681f4..0000000000000 --- a/frame/contracts/src/wasm/env_def/macros.rs +++ /dev/null @@ -1,396 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Definition of macros that hides boilerplate of defining external environment -//! for a wasm module. -//! -//! Most likely you should use `define_env` macro. - -macro_rules! convert_args { - () => (vec![]); - ( $( $t:ty ),* ) => ( vec![ $( { use $crate::wasm::env_def::ConvertibleToWasm; <$t>::VALUE_TYPE }, )* ] ); -} - -macro_rules! gen_signature { - ( ( $( $params: ty ),* ) ) => ( - { - wasm_instrument::parity_wasm::elements::FunctionType::new( - convert_args!($($params),*), vec![], - ) - } - ); - - ( ( $( $params: ty ),* ) -> $returns: ty ) => ( - { - wasm_instrument::parity_wasm::elements::FunctionType::new( - convert_args!($($params),*), - vec![{use $crate::wasm::env_def::ConvertibleToWasm; <$returns>::VALUE_TYPE}], - ) - } - ); -} - -macro_rules! gen_signature_dispatch { - ( - $needle_module:ident, - $needle_name:ident, - $needle_sig:ident ; - $module:ident, - $name:ident - ( $ctx:ident $( , $names:ident : $params:ty )* ) $( -> $returns:ty )* , $($rest:tt)* - ) => { - let module = stringify!($module).as_bytes(); - if module == $needle_module && stringify!($name).as_bytes() == $needle_name { - let signature = gen_signature!( ( $( $params ),* ) $( -> $returns )* ); - if $needle_sig == &signature { - return true; - } - } else { - gen_signature_dispatch!($needle_module, $needle_name, $needle_sig ; $($rest)*); - } - }; - ( $needle_module:ident, $needle_name:ident, $needle_sig:ident ; ) => {}; -} - -/// Unmarshall arguments and then execute `body` expression and return its result. -macro_rules! unmarshall_then_body { - ( $body:tt, $ctx:ident, $args_iter:ident, $( $names:ident : $params:ty ),* ) => ({ - $( - let $names : <$params as $crate::wasm::env_def::ConvertibleToWasm>::NativeType = - $args_iter.next() - .and_then(|v| <$params as $crate::wasm::env_def::ConvertibleToWasm> - ::from_typed_value(v.clone())) - .expect( - "precondition: all imports should be checked against the signatures of corresponding - functions defined by `define_env!` macro by the user of the macro; - signatures of these functions defined by `$params`; - calls always made with arguments types of which are defined by the corresponding imports; - thus types of arguments should be equal to type list in `$params` and - length of argument list and $params should be equal; - thus this can never be `None`; - qed; - " - ); - )* - $body - }) -} - -/// Since we can't specify the type of closure directly at binding site: -/// -/// ```nocompile -/// let f: FnOnce() -> Result<::NativeType, _> = || { /* ... */ }; -/// ``` -/// -/// we use this function to constrain the type of the closure. -#[inline(always)] -pub fn constrain_closure(f: F) -> F -where - F: FnOnce() -> Result, -{ - f -} - -macro_rules! unmarshall_then_body_then_marshall { - ( $args_iter:ident, $ctx:ident, ( $( $names:ident : $params:ty ),* ) -> $returns:ty => $body:tt ) => ({ - let body = $crate::wasm::env_def::macros::constrain_closure::< - <$returns as $crate::wasm::env_def::ConvertibleToWasm>::NativeType, _ - >(|| { - unmarshall_then_body!($body, $ctx, $args_iter, $( $names : $params ),*) - }); - let r = body().map_err(|reason| { - $ctx.set_trap_reason(reason); - sp_sandbox::HostError - })?; - return Ok(sp_sandbox::ReturnValue::Value({ use $crate::wasm::env_def::ConvertibleToWasm; r.to_typed_value() })) - }); - ( $args_iter:ident, $ctx:ident, ( $( $names:ident : $params:ty ),* ) => $body:tt ) => ({ - let body = $crate::wasm::env_def::macros::constrain_closure::<(), _>(|| { - unmarshall_then_body!($body, $ctx, $args_iter, $( $names : $params ),*) - }); - body().map_err(|reason| { - $ctx.set_trap_reason(reason); - sp_sandbox::HostError - })?; - return Ok(sp_sandbox::ReturnValue::Unit) - }) -} - -macro_rules! define_func { - ( $trait:tt $name:ident ( $ctx: ident $(, $names:ident : $params:ty)*) $(-> $returns:ty)* => $body:tt ) => { - fn $name< E: $trait >( - $ctx: &mut $crate::wasm::Runtime, - args: &[sp_sandbox::Value], - ) -> Result - where - ::AccountId: - sp_core::crypto::UncheckedFrom<::Hash> + - AsRef<[u8]> - { - #[allow(unused)] - let mut args = args.iter(); - - unmarshall_then_body_then_marshall!( - args, - $ctx, - ( $( $names : $params ),* ) $( -> $returns )* => $body - ) - } - }; -} - -macro_rules! register_body { - ( $reg_cb:ident, $trait:tt; - $module:ident $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) - $( -> $returns:ty )* => $body:tt - ) => { - $reg_cb( - stringify!($module).as_bytes(), - stringify!($name).as_bytes(), - { - define_func!( - $trait $name ( $ctx $(, $names : $params )* ) $( -> $returns )* => $body - ); - $name:: - } - ); - } -} - -macro_rules! register_func { - ( $reg_cb:ident, $trait:tt; ) => {}; - - ( $reg_cb:ident, $trait:tt; - __unstable__ $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) - $( -> $returns:ty )* => $body:tt $($rest:tt)* - ) => { - #[cfg(feature = "unstable-interface")] - register_body!( - $reg_cb, $trait; - __unstable__ $name - ( $ctx $( , $names : $params )* ) - $( -> $returns )* => $body - ); - register_func!( $reg_cb, $trait; $($rest)* ); - }; - - ( $reg_cb:ident, $trait:tt; - $module:ident $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) - $( -> $returns:ty )* => $body:tt $($rest:tt)* - ) => { - register_body!( - $reg_cb, $trait; - $module $name - ( $ctx $( , $names : $params )* ) - $( -> $returns )* => $body - ); - register_func!( $reg_cb, $trait; $($rest)* ); - }; -} - -/// Define a function set that can be imported by executing wasm code. -/// -/// **NB**: Be advised that all functions defined by this macro -/// will panic if called with unexpected arguments. -/// -/// It's up to the user of this macro to check signatures of wasm code to be executed -/// and reject the code if any imported function has a mismatched signature. -macro_rules! define_env { - ( $init_name:ident , < E: $trait:tt > , - $( [$module:ident] $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) - $( -> $returns:ty )* => $body:tt , )* - ) => { - pub struct $init_name; - - impl $crate::wasm::env_def::ImportSatisfyCheck for $init_name { - fn can_satisfy( - module: &[u8], - name: &[u8], - func_type: &wasm_instrument::parity_wasm::elements::FunctionType, - ) -> bool - { - #[cfg(not(feature = "unstable-interface"))] - if module == b"__unstable__" { - return false; - } - gen_signature_dispatch!( - module, name, func_type ; - $( $module, $name ( $ctx $(, $names : $params )* ) $( -> $returns )* , )* - ); - - return false; - } - } - - impl $crate::wasm::env_def::FunctionImplProvider for $init_name - where - ::AccountId: - sp_core::crypto::UncheckedFrom<::Hash> + - AsRef<[u8]> - { - fn impls)>(f: &mut F) { - register_func!( - f, - $trait; - $( $module $name ( $ctx $( , $names : $params )* ) $( -> $returns)* => $body )* - ); - } - } - }; -} - -#[cfg(test)] -mod tests { - use crate::{ - exec::Ext, - wasm::{runtime::TrapReason, tests::MockExt, Runtime}, - Weight, - }; - use sp_runtime::traits::Zero; - use sp_sandbox::{ReturnValue, Value}; - use wasm_instrument::parity_wasm::elements::{FunctionType, ValueType}; - - struct TestRuntime { - value: u32, - } - - impl TestRuntime { - fn set_trap_reason(&mut self, _reason: TrapReason) {} - } - - #[test] - fn macro_unmarshall_then_body_then_marshall_value_or_trap() { - fn test_value( - _ctx: &mut TestRuntime, - args: &[sp_sandbox::Value], - ) -> Result { - let mut args = args.iter(); - unmarshall_then_body_then_marshall!( - args, - _ctx, - (a: u32, b: u32) -> u32 => { - if b == 0 { - Err(crate::wasm::runtime::TrapReason::Termination) - } else { - Ok(a / b) - } - } - ) - } - - let ctx = &mut TestRuntime { value: 0 }; - assert_eq!( - test_value(ctx, &[Value::I32(15), Value::I32(3)]).unwrap(), - ReturnValue::Value(Value::I32(5)), - ); - assert!(test_value(ctx, &[Value::I32(15), Value::I32(0)]).is_err()); - } - - #[test] - fn macro_unmarshall_then_body_then_marshall_unit() { - fn test_unit( - ctx: &mut TestRuntime, - args: &[sp_sandbox::Value], - ) -> Result { - let mut args = args.iter(); - unmarshall_then_body_then_marshall!( - args, - ctx, - (a: u32, b: u32) => { - ctx.value = a + b; - Ok(()) - } - ) - } - - let ctx = &mut TestRuntime { value: 0 }; - let result = test_unit(ctx, &[Value::I32(2), Value::I32(3)]).unwrap(); - assert_eq!(result, ReturnValue::Unit); - assert_eq!(ctx.value, 5); - } - - #[test] - fn macro_define_func() { - define_func!( Ext seal_gas (_ctx, amount: u32) => { - let amount = Weight::from(amount); - if !amount.is_zero() { - Ok(()) - } else { - Err(TrapReason::Termination) - } - }); - let _f: fn( - &mut Runtime, - &[sp_sandbox::Value], - ) -> Result = seal_gas::; - } - - #[test] - fn macro_gen_signature() { - assert_eq!(gen_signature!((i32)), FunctionType::new(vec![ValueType::I32], vec![])); - - assert_eq!( - gen_signature!( (i32, u32) -> u32 ), - FunctionType::new(vec![ValueType::I32, ValueType::I32], vec![ValueType::I32]), - ); - } - - #[test] - fn macro_unmarshall_then_body() { - let args = vec![Value::I32(5), Value::I32(3)]; - let mut args = args.iter(); - - let ctx: &mut u32 = &mut 0; - - let r = unmarshall_then_body!( - { - *ctx = a + b; - a * b - }, - ctx, - args, - a: u32, - b: u32 - ); - - assert_eq!(*ctx, 8); - assert_eq!(r, 15); - } - - #[test] - fn macro_define_env() { - use crate::wasm::env_def::ImportSatisfyCheck; - - define_env!(Env, , - [seal0] seal_gas( _ctx, amount: u32 ) => { - let amount = Weight::from(amount); - if !amount.is_zero() { - Ok(()) - } else { - Err(crate::wasm::runtime::TrapReason::Termination) - } - }, - ); - - assert!(Env::can_satisfy( - b"seal0", - b"seal_gas", - &FunctionType::new(vec![ValueType::I32], vec![]) - )); - assert!(!Env::can_satisfy(b"seal0", b"not_exists", &FunctionType::new(vec![], vec![]))); - } -} diff --git a/frame/contracts/src/wasm/env_def/mod.rs b/frame/contracts/src/wasm/env_def/mod.rs index b4c5ffe81e7c1..be6e688c97868 100644 --- a/frame/contracts/src/wasm/env_def/mod.rs +++ b/frame/contracts/src/wasm/env_def/mod.rs @@ -21,9 +21,6 @@ use crate::exec::Ext; use sp_sandbox::Value; use wasm_instrument::parity_wasm::elements::{FunctionType, ValueType}; -#[macro_use] -pub mod macros; - pub trait ConvertibleToWasm: Sized { const VALUE_TYPE: ValueType; type NativeType; @@ -31,8 +28,8 @@ pub trait ConvertibleToWasm: Sized { fn from_typed_value(_: Value) -> Option; } impl ConvertibleToWasm for i32 { - type NativeType = i32; const VALUE_TYPE: ValueType = ValueType::I32; + type NativeType = i32; fn to_typed_value(self) -> Value { Value::I32(self) } @@ -41,8 +38,8 @@ impl ConvertibleToWasm for i32 { } } impl ConvertibleToWasm for u32 { - type NativeType = u32; const VALUE_TYPE: ValueType = ValueType::I32; + type NativeType = u32; fn to_typed_value(self) -> Value { Value::I32(self as i32) } @@ -54,8 +51,8 @@ impl ConvertibleToWasm for u32 { } } impl ConvertibleToWasm for u64 { - type NativeType = u64; const VALUE_TYPE: ValueType = ValueType::I64; + type NativeType = u64; fn to_typed_value(self) -> Value { Value::I64(self as i64) } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 3dd5da187b258..f989c21b00ffc 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -365,7 +365,7 @@ mod tests { events: Default::default(), runtime_calls: Default::default(), schedule: Default::default(), - gas_meter: GasMeter::new(10_000_000_000), + gas_meter: GasMeter::new(Weight::from_ref_time(10_000_000_000)), debug_buffer: Default::default(), ecdsa_recover: Default::default(), } @@ -403,10 +403,10 @@ mod tests { salt: &[u8], ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { self.instantiates.push(InstantiateEntry { - code_hash: code_hash.clone(), + code_hash, value, data: data.to_vec(), - gas_left: gas_limit, + gas_left: gas_limit.ref_time(), salt: salt.to_vec(), }); Ok(( @@ -520,7 +520,7 @@ mod tests { 16_384 } fn get_weight_price(&self, weight: Weight) -> BalanceOf { - BalanceOf::::from(1312_u32).saturating_mul(weight.into()) + BalanceOf::::from(1312_u32).saturating_mul(weight.ref_time().into()) } fn schedule(&self) -> &Schedule { &self.schedule @@ -541,7 +541,7 @@ mod tests { signature: &[u8; 65], message_hash: &[u8; 32], ) -> Result<[u8; 33], ()> { - self.ecdsa_recover.borrow_mut().push((signature.clone(), message_hash.clone())); + self.ecdsa_recover.borrow_mut().push((*signature, *message_hash)); Ok([3; 33]) } fn contract_info(&mut self) -> &mut crate::ContractInfo { @@ -1911,7 +1911,7 @@ mod tests { )] ); - assert!(mock_ext.gas_meter.gas_left() > 0); + assert!(mock_ext.gas_meter.gas_left() > Weight::zero()); } const CODE_DEPOSIT_EVENT_MAX_TOPICS: &str = r#" diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index f6fff20de6b1a..7b81c1c55b3bd 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -517,6 +517,7 @@ mod tests { schedule::Limits, tests::{Test, ALICE}, }; + use pallet_contracts_proc_macro::define_env; use std::fmt; impl fmt::Debug for PrefabWasmModule { @@ -532,17 +533,27 @@ mod tests { // Define test environment for tests. We need ImportSatisfyCheck // implementation from it. So actual implementations doesn't matter. - define_env!(Test, , - [seal0] panic(_ctx) => { unreachable!(); }, + #[define_env] + pub mod test_env { + fn panic(_ctx: crate::wasm::Runtime) -> Result<(), TrapReason> { + Ok(()) + } // gas is an implementation defined function and a contract can't import it. - [seal0] gas(_ctx, _amount: u32) => { unreachable!(); }, + fn gas(_ctx: crate::wasm::Runtime, _amount: u32) -> Result<(), TrapReason> { + Ok(()) + } - [seal0] nop(_ctx, _unused: u64) => { unreachable!(); }, + fn nop(_ctx: crate::wasm::Runtime, _unused: u64) -> Result<(), TrapReason> { + Ok(()) + } // new version of nop with other data type for argumebt - [seal1] nop(_ctx, _unused: i32) => { unreachable!(); }, - ); + #[version(1)] + fn nop(_ctx: crate::wasm::Runtime, _unused: i32) -> Result<(), TrapReason> { + Ok(()) + } + } } macro_rules! prepare_test { @@ -561,7 +572,7 @@ mod tests { }, .. Default::default() }; - let r = do_preparation::(wasm, &schedule, ALICE); + let r = do_preparation::(wasm, &schedule, ALICE); assert_matches::assert_matches!(r.map_err(|(_, msg)| msg), $($expected)*); } }; diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index c1757ba06412a..296f322f494d0 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -26,9 +26,10 @@ use crate::{ }; use bitflags::bitflags; -use codec::{Decode, DecodeAll, Encode, MaxEncodedLen}; +use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; +use pallet_contracts_proc_macro::define_env; use sp_core::{crypto::UncheckedFrom, Bytes}; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; use sp_runtime::traits::{Bounded, Zero}; @@ -36,6 +37,9 @@ use sp_sandbox::SandboxMemory; use sp_std::prelude::*; use wasm_instrument::parity_wasm::elements::ValueType; +/// The maximum nesting depth a contract can use when encoding types. +const MAX_DECODE_NESTING: u32 = 256; + /// Type of a storage key. #[allow(dead_code)] enum KeyType { @@ -65,7 +69,7 @@ impl KeyType { /// This enum can be extended in the future: New codes can be added but existing codes /// will not be changed or removed. This means that any contract **must not** exhaustively /// match return codes. Instead, contracts should prepare for unknown variants and deal with -/// those errors gracefuly in order to be forward compatible. +/// those errors gracefully in order to be forward compatible. #[repr(u32)] pub enum ReturnCode { /// API call successful. @@ -101,8 +105,9 @@ pub enum ReturnCode { } impl ConvertibleToWasm for ReturnCode { - type NativeType = Self; const VALUE_TYPE: ValueType = ValueType::I32; + type NativeType = Self; + fn to_typed_value(self) -> sp_sandbox::Value { sp_sandbox::Value::I32(self as i32) } @@ -322,14 +327,14 @@ impl RuntimeCosts { EcdsaRecovery => s.ecdsa_recover, ChainExtension(amount) => amount, #[cfg(feature = "unstable-interface")] - CallRuntime(weight) => weight, + CallRuntime(weight) => weight.ref_time(), SetCodeHash => s.set_code_hash, EcdsaToEthAddress => s.ecdsa_to_eth_address, }; RuntimeToken { #[cfg(test)] _created_from: *self, - weight, + weight: Weight::from_ref_time(weight), } } } @@ -439,6 +444,7 @@ pub struct Runtime<'a, E: Ext + 'a> { input_data: Option>, memory: sp_sandbox::default_executor::Memory, trap_reason: Option, + chain_extension: Option::ChainExtension>>, } impl<'a, E> Runtime<'a, E> @@ -452,7 +458,13 @@ where input_data: Vec, memory: sp_sandbox::default_executor::Memory, ) -> Self { - Runtime { ext, input_data: Some(input_data), memory, trap_reason: None } + Runtime { + ext, + input_data: Some(input_data), + memory, + trap_reason: None, + chain_extension: Some(Box::new(Default::default())), + } } /// Converts the sandbox result and the runtime state into the execution outcome. @@ -567,7 +579,7 @@ where ptr: u32, ) -> Result { let buf = self.read_sandbox_memory(ptr, D::max_encoded_len() as u32)?; - let decoded = D::decode_all(&mut &buf[..]) + let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut &buf[..]) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } @@ -589,7 +601,7 @@ where len: u32, ) -> Result { let buf = self.read_sandbox_memory(ptr, len)?; - let decoded = D::decode_all(&mut &buf[..]) + let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut &buf[..]) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } @@ -845,7 +857,7 @@ where self.charge_gas(RuntimeCosts::CallSurchargeTransfer)?; } self.ext.call( - gas, + Weight::from_ref_time(gas), callee, value, input_data, @@ -894,6 +906,7 @@ where salt_ptr: u32, salt_len: u32, ) -> Result { + let gas = Weight::from_ref_time(gas); self.charge_gas(RuntimeCosts::InstantiateBase { input_data_len, salt_len })?; let value: BalanceOf<::T> = self.read_sandbox_memory_as(value_ptr)?; if value > 0u32.into() { @@ -936,183 +949,235 @@ where // Any input that leads to a out of bound error (reading or writing) or failing to decode // data passed to the supervisor will lead to a trap. This is not documented explicitly // for every function. -define_env!(Env, , - // Account for used gas. Traps if gas used is greater than gas limit. - // - // NOTE: This is a implementation defined call and is NOT a part of the public API. - // This call is supposed to be called only by instrumentation injected code. - // - // - amount: How much gas is used. - [seal0] gas(ctx, amount: u32) => { +#[define_env] +pub mod env { + /// Account for used gas. Traps if gas used is greater than gas limit. + /// + /// NOTE: This is a implementation defined call and is NOT a part of the public API. + /// This call is supposed to be called only by instrumentation injected code. + /// + /// - amount: How much gas is used. + fn gas(ctx: Runtime, amount: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::MeteringBlock(amount))?; Ok(()) - }, + } - // Set the value at the given key in the contract storage. - // - // Equivalent to the newer version of `seal_set_storage` with the exception of the return - // type. Still a valid thing to call when not interested in the return value. - [seal0] seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) => { + /// Set the value at the given key in the contract storage. + /// + /// Equivalent to the newer version of `seal_set_storage` with the exception of the return + /// type. Still a valid thing to call when not interested in the return value. + fn seal_set_storage( + ctx: Runtime, + key_ptr: u32, + value_ptr: u32, + value_len: u32, + ) -> Result<(), TrapReason> { ctx.set_storage(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) - }, + } - // Set the value at the given key in the contract storage. - // - // This version is to be used with a fixed sized storage key. For runtimes supporting transparent - // hashing, please use the newer version of this function. - // - // The value length must not exceed the maximum defined by the contracts module parameters. - // Specifying a `value_len` of zero will store an empty value. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the location to store the value is placed. - // - `value_ptr`: pointer into the linear memory where the value to set is placed. - // - `value_len`: the length of the value in bytes. - // - // # Return Value - // - // Returns the size of the pre-existing value at the specified key if any. Otherwise - // `SENTINEL` is returned as a sentinel value. - [seal1] seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) -> u32 => { + /// Set the value at the given key in the contract storage. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + /// + /// The value length must not exceed the maximum defined by the contracts module parameters. + /// Specifying a `value_len` of zero will store an empty value. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the location to store the value is placed. + /// - `value_ptr`: pointer into the linear memory where the value to set is placed. + /// - `value_len`: the length of the value in bytes. + /// + /// # Return Value + /// + /// Returns the size of the pre-existing value at the specified key if any. Otherwise + /// `SENTINEL` is returned as a sentinel value. + #[version(1)] + fn seal_set_storage( + ctx: Runtime, + key_ptr: u32, + value_ptr: u32, + value_len: u32, + ) -> Result { ctx.set_storage(KeyType::Fix, key_ptr, value_ptr, value_len) - }, + } - // Set the value at the given key in the contract storage. - // - // The key and value lengths must not exceed the maximums defined by the contracts module parameters. - // Specifying a `value_len` of zero will store an empty value. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the location to store the value is placed. - // - `key_len`: the length of the key in bytes. - // - `value_ptr`: pointer into the linear memory where the value to set is placed. - // - `value_len`: the length of the value in bytes. - // - // # Return Value - // - // Returns the size of the pre-existing value at the specified key if any. Otherwise - // `SENTINEL` is returned as a sentinel value. - [__unstable__] seal_set_storage(ctx, key_ptr: u32, key_len: u32, value_ptr: u32, value_len: u32) -> u32 => { + /// Set the value at the given key in the contract storage. + /// + /// The key and value lengths must not exceed the maximums defined by the contracts module + /// parameters. Specifying a `value_len` of zero will store an empty value. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the location to store the value is placed. + /// - `key_len`: the length of the key in bytes. + /// - `value_ptr`: pointer into the linear memory where the value to set is placed. + /// - `value_len`: the length of the value in bytes. + /// + /// # Return Value + /// + /// Returns the size of the pre-existing value at the specified key if any. Otherwise + /// `SENTINEL` is returned as a sentinel value. + #[unstable] + fn seal_set_storage( + ctx: Runtime, + key_ptr: u32, + key_len: u32, + value_ptr: u32, + value_len: u32, + ) -> Result { ctx.set_storage(KeyType::Variable(key_len), key_ptr, value_ptr, value_len) - }, + } - // Clear the value at the given key in the contract storage. - // - // Equivalent to the newer version of `seal_clear_storage` with the exception of the return - // type. Still a valid thing to call when not interested in the return value. - [seal0] seal_clear_storage(ctx, key_ptr: u32) => { + /// Clear the value at the given key in the contract storage. + /// + /// Equivalent to the newer version of `seal_clear_storage` with the exception of the return + /// type. Still a valid thing to call when not interested in the return value. + fn seal_clear_storage(ctx: Runtime, key_ptr: u32) -> Result<(), TrapReason> { ctx.clear_storage(KeyType::Fix, key_ptr).map(|_| ()) - }, + } - // Clear the value at the given key in the contract storage. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the key is placed. - // - `key_len`: the length of the key in bytes. - // - // # Return Value - // - // Returns the size of the pre-existing value at the specified key if any. Otherwise - // `SENTINEL` is returned as a sentinel value. - [__unstable__] seal_clear_storage(ctx, key_ptr: u32, key_len: u32) -> u32 => { + /// Clear the value at the given key in the contract storage. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the key is placed. + /// - `key_len`: the length of the key in bytes. + /// + /// # Return Value + /// + /// Returns the size of the pre-existing value at the specified key if any. Otherwise + /// `SENTINEL` is returned as a sentinel value. + #[unstable] + fn seal_clear_storage( + ctx: Runtime, + key_ptr: u32, + key_len: u32, + ) -> Result { ctx.clear_storage(KeyType::Variable(key_len), key_ptr) - }, + } - // Retrieve the value under the given key from storage. - // - // This version is to be used with a fixed sized storage key. For runtimes supporting transparent - // hashing, please use the newer version of this function. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - // - `out_ptr`: pointer to the linear memory where the value is written to. - // - `out_len_ptr`: in-out pointer into linear memory where the buffer length - // is read from and the value length is written to. - // - // # Errors - // - // `ReturnCode::KeyNotFound` - [seal0] seal_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + /// Retrieve the value under the given key from storage. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + /// - `out_ptr`: pointer to the linear memory where the value is written to. + /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and + /// the value length is written to. + /// + /// # Errors + /// + /// `ReturnCode::KeyNotFound` + fn seal_get_storage( + ctx: Runtime, + key_ptr: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { ctx.get_storage(KeyType::Fix, key_ptr, out_ptr, out_len_ptr) - }, + } - // Retrieve the value under the given key from storage. - // - // This version is to be used with a fixed sized storage key. For runtimes supporting transparent - // hashing, please use the newer version of this function. - // - // The key length must not exceed the maximum defined by the contracts module parameter. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - // - `key_len`: the length of the key in bytes. - // - `out_ptr`: pointer to the linear memory where the value is written to. - // - `out_len_ptr`: in-out pointer into linear memory where the buffer length - // is read from and the value length is written to. - // - // # Errors - // - // `ReturnCode::KeyNotFound` - [__unstable__] seal_get_storage(ctx, key_ptr: u32, key_len: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + /// Retrieve the value under the given key from storage. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + /// + /// The key length must not exceed the maximum defined by the contracts module parameter. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + /// - `key_len`: the length of the key in bytes. + /// - `out_ptr`: pointer to the linear memory where the value is written to. + /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and + /// the value length is written to. + /// + /// # Errors + /// + /// `ReturnCode::KeyNotFound` + #[unstable] + fn seal_get_storage( + ctx: Runtime, + key_ptr: u32, + key_len: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { ctx.get_storage(KeyType::Variable(key_len), key_ptr, out_ptr, out_len_ptr) - }, + } - // Checks whether there is a value stored under the given key. - // - // This version is to be used with a fixed sized storage key. For runtimes supporting transparent - // hashing, please use the newer version of this function. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - // - // # Return Value - // - // Returns the size of the pre-existing value at the specified key if any. Otherwise - // `SENTINEL` is returned as a sentinel value. - [seal0] seal_contains_storage(ctx, key_ptr: u32) -> u32 => { + /// Checks whether there is a value stored under the given key. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + /// + /// # Return Value + /// + /// Returns the size of the pre-existing value at the specified key if any. Otherwise + /// `SENTINEL` is returned as a sentinel value. + fn seal_contains_storage(ctx: Runtime, key_ptr: u32) -> Result { ctx.contains_storage(KeyType::Fix, key_ptr) - }, + } - // Checks whether there is a value stored under the given key. - // - // The key length must not exceed the maximum defined by the contracts module parameter. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - // - `key_len`: the length of the key in bytes. - // - // # Return Value - // - // Returns the size of the pre-existing value at the specified key if any. Otherwise - // `SENTINEL` is returned as a sentinel value. - [__unstable__] seal_contains_storage(ctx, key_ptr: u32, key_len: u32) -> u32 => { + /// Checks whether there is a value stored under the given key. + /// + /// The key length must not exceed the maximum defined by the contracts module parameter. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + /// - `key_len`: the length of the key in bytes. + /// + /// # Return Value + /// + /// Returns the size of the pre-existing value at the specified key if any. Otherwise + /// `SENTINEL` is returned as a sentinel value. + #[unstable] + fn seal_contains_storage( + ctx: Runtime, + key_ptr: u32, + key_len: u32, + ) -> Result { ctx.contains_storage(KeyType::Variable(key_len), key_ptr) - }, + } - // Retrieve and remove the value under the given key from storage. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - // - `key_len`: the length of the key in bytes. - // - `out_ptr`: pointer to the linear memory where the value is written to. - // - `out_len_ptr`: in-out pointer into linear memory where the buffer length - // is read from and the value length is written to. - // - // # Errors - // - // `ReturnCode::KeyNotFound` - [__unstable__] seal_take_storage(ctx, key_ptr: u32, key_len: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + /// Retrieve and remove the value under the given key from storage. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + /// - `key_len`: the length of the key in bytes. + /// - `out_ptr`: pointer to the linear memory where the value is written to. + /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and + /// the value length is written to. + /// + /// # Errors + /// + /// `ReturnCode::KeyNotFound` + #[unstable] + fn seal_take_storage( + ctx: Runtime, + key_ptr: u32, + key_len: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { let charged = ctx.charge_gas(RuntimeCosts::TakeStorage(ctx.ext.max_value_size()))?; let key = ctx.read_sandbox_memory(key_ptr, key_len)?; - if let crate::storage::WriteOutcome::Taken(value) = ctx.ext.set_storage_transparent(&VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, None, true)? { + if let crate::storage::WriteOutcome::Taken(value) = ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, + None, + true, + )? { ctx.adjust_gas(charged, RuntimeCosts::TakeStorage(value.len() as u32)); ctx.write_sandbox_output(out_ptr, out_len_ptr, &value, false, already_charged)?; Ok(ReturnCode::Success) @@ -1120,59 +1185,56 @@ define_env!(Env, , ctx.adjust_gas(charged, RuntimeCosts::TakeStorage(0)); Ok(ReturnCode::KeyNotFound) } - }, - - // Transfer some value to another account. - // - // # Parameters - // - // - account_ptr: a pointer to the address of the beneficiary account - // Should be decodable as an `T::AccountId`. Traps otherwise. - // - account_len: length of the address buffer. - // - value_ptr: a pointer to the buffer with value, how much value to send. - // Should be decodable as a `T::Balance`. Traps otherwise. - // - value_len: length of the value buffer. - // - // # Errors - // - // `ReturnCode::TransferFailed` - [seal0] seal_transfer( - ctx, + } + /// Transfer some value to another account. + /// + /// # Parameters + /// + /// - account_ptr: a pointer to the address of the beneficiary account Should be decodable as an + /// `T::AccountId`. Traps otherwise. + /// - account_len: length of the address buffer. + /// - value_ptr: a pointer to the buffer with value, how much value to send. Should be decodable + /// as a `T::Balance`. Traps otherwise. + /// - value_len: length of the value buffer. + /// + /// # Errors + /// + /// `ReturnCode::TransferFailed` + fn seal_transfer( + ctx: Runtime, account_ptr: u32, _account_len: u32, value_ptr: u32, - _value_len: u32 - ) -> ReturnCode => { + _value_len: u32, + ) -> Result { ctx.charge_gas(RuntimeCosts::Transfer)?; let callee: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(account_ptr)?; - let value: BalanceOf<::T> = - ctx.read_sandbox_memory_as(value_ptr)?; - + let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr)?; let result = ctx.ext.transfer(&callee, value); match result { Ok(()) => Ok(ReturnCode::Success), Err(err) => { let code = Runtime::::err_into_return_code(err)?; Ok(code) - } + }, } - }, + } - // Make a call to another contract. - // - // # Deprecation - // - // This is equivalent to calling the newer version of this function with - // `flags` set to `ALLOW_REENTRY`. See the newer version for documentation. - // - // # Note - // - // The values `_callee_len` and `_value_len` are ignored because the encoded sizes - // of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards - // compatibility. Consider switching to the newest version of this function. - [seal0] seal_call( - ctx, + /// Make a call to another contract. + /// + /// # Deprecation + /// + /// This is equivalent to calling the newer version of this function with + /// `flags` set to `ALLOW_REENTRY`. See the newer version for documentation. + /// + /// # Note + /// + /// The values `_callee_len` and `_value_len` are ignored because the encoded sizes + /// of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards + /// compatibility. Consider switching to the newest version of this function. + fn seal_call( + ctx: Runtime, callee_ptr: u32, _callee_len: u32, gas: u64, @@ -1181,49 +1243,50 @@ define_env!(Env, , input_data_ptr: u32, input_data_len: u32, output_ptr: u32, - output_len_ptr: u32 - ) -> ReturnCode => { + output_len_ptr: u32, + ) -> Result { ctx.call( CallFlags::ALLOW_REENTRY, - CallType::Call{callee_ptr, value_ptr, gas}, + CallType::Call { callee_ptr, value_ptr, gas }, input_data_ptr, input_data_len, output_ptr, output_len_ptr, ) - }, + } - // Make a call to another contract. - // - // The callees output buffer is copied to `output_ptr` and its length to `output_len_ptr`. - // The copy of the output buffer can be skipped by supplying the sentinel value - // of `SENTINEL` to `output_ptr`. - // - // # Parameters - // - // - flags: See [`CallFlags`] for a documenation of the supported flags. - // - callee_ptr: a pointer to the address of the callee contract. - // Should be decodable as an `T::AccountId`. Traps otherwise. - // - gas: how much gas to devote to the execution. - // - value_ptr: a pointer to the buffer with value, how much value to send. - // Should be decodable as a `T::Balance`. Traps otherwise. - // - input_data_ptr: a pointer to a buffer to be used as input data to the callee. - // - input_data_len: length of the input data buffer. - // - output_ptr: a pointer where the output buffer is copied to. - // - output_len_ptr: in-out pointer to where the length of the buffer is read from - // and the actual length is written to. - // - // # Errors - // - // An error means that the call wasn't successful output buffer is returned unless - // stated otherwise. - // - // `ReturnCode::CalleeReverted`: Output buffer is returned. - // `ReturnCode::CalleeTrapped` - // `ReturnCode::TransferFailed` - // `ReturnCode::NotCallable` - [seal1] seal_call( - ctx, + /// Make a call to another contract. + /// + /// The callees output buffer is copied to `output_ptr` and its length to `output_len_ptr`. + /// The copy of the output buffer can be skipped by supplying the sentinel value + /// of `SENTINEL` to `output_ptr`. + /// + /// # Parameters + /// + /// - flags: See [`CallFlags`] for a documenation of the supported flags. + /// - callee_ptr: a pointer to the address of the callee contract. Should be decodable as an + /// `T::AccountId`. Traps otherwise. + /// - gas: how much gas to devote to the execution. + /// - value_ptr: a pointer to the buffer with value, how much value to send. Should be decodable + /// as a `T::Balance`. Traps otherwise. + /// - input_data_ptr: a pointer to a buffer to be used as input data to the callee. + /// - input_data_len: length of the input data buffer. + /// - output_ptr: a pointer where the output buffer is copied to. + /// - output_len_ptr: in-out pointer to where the length of the buffer is read from and the + /// actual length is written to. + /// + /// # Errors + /// + /// An error means that the call wasn't successful output buffer is returned unless + /// stated otherwise. + /// + /// `ReturnCode::CalleeReverted`: Output buffer is returned. + /// `ReturnCode::CalleeTrapped` + /// `ReturnCode::TransferFailed` + /// `ReturnCode::NotCallable` + #[version(1)] + fn seal_call( + ctx: Runtime, flags: u32, callee_ptr: u32, gas: u64, @@ -1231,75 +1294,74 @@ define_env!(Env, , input_data_ptr: u32, input_data_len: u32, output_ptr: u32, - output_len_ptr: u32 - ) -> ReturnCode => { + output_len_ptr: u32, + ) -> Result { ctx.call( CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, - CallType::Call{callee_ptr, value_ptr, gas}, + CallType::Call { callee_ptr, value_ptr, gas }, input_data_ptr, input_data_len, output_ptr, output_len_ptr, ) - }, + } - // Execute code in the context (storage, caller, value) of the current contract. - // - // Reentrancy protection is always disabled since the callee is allowed - // to modify the callers storage. This makes going through a reentrancy attack - // unnecessary for the callee when it wants to exploit the caller. - // - // # Parameters - // - // - flags: See [`CallFlags`] for a documentation of the supported flags. - // - code_hash: a pointer to the hash of the code to be called. - // - input_data_ptr: a pointer to a buffer to be used as input data to the callee. - // - input_data_len: length of the input data buffer. - // - output_ptr: a pointer where the output buffer is copied to. - // - output_len_ptr: in-out pointer to where the length of the buffer is read from - // and the actual length is written to. - // - // # Errors - // - // An error means that the call wasn't successful and no output buffer is returned unless - // stated otherwise. - // - // `ReturnCode::CalleeReverted`: Output buffer is returned. - // `ReturnCode::CalleeTrapped` - // `ReturnCode::CodeNotFound` - [seal0] seal_delegate_call( - ctx, + /// Execute code in the context (storage, caller, value) of the current contract. + /// + /// Reentrancy protection is always disabled since the callee is allowed + /// to modify the callers storage. This makes going through a reentrancy attack + /// unnecessary for the callee when it wants to exploit the caller. + /// + /// # Parameters + /// + /// - flags: See [`CallFlags`] for a documentation of the supported flags. + /// - code_hash: a pointer to the hash of the code to be called. + /// - input_data_ptr: a pointer to a buffer to be used as input data to the callee. + /// - input_data_len: length of the input data buffer. + /// - output_ptr: a pointer where the output buffer is copied to. + /// - output_len_ptr: in-out pointer to where the length of the buffer is read from and the + /// actual length is written to. + /// + /// # Errors + /// + /// An error means that the call wasn't successful and no output buffer is returned unless + /// stated otherwise. + /// + /// `ReturnCode::CalleeReverted`: Output buffer is returned. + /// `ReturnCode::CalleeTrapped` + /// `ReturnCode::CodeNotFound` + fn seal_delegate_call( + ctx: Runtime, flags: u32, code_hash_ptr: u32, input_data_ptr: u32, input_data_len: u32, output_ptr: u32, - output_len_ptr: u32 - ) -> ReturnCode => { + output_len_ptr: u32, + ) -> Result { ctx.call( CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, - CallType::DelegateCall{code_hash_ptr}, + CallType::DelegateCall { code_hash_ptr }, input_data_ptr, input_data_len, output_ptr, output_len_ptr, ) - }, - - // Instantiate a contract with the specified code hash. - // - // # Deprecation - // - // This is equivalent to calling the newer version of this function. The newer version - // drops the now unnecessary length fields. - // - // # Note - // - // The values `_code_hash_len` and `_value_len` are ignored because the encoded sizes - // of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards - // compatibility. Consider switching to the newest version of this function. - [seal0] seal_instantiate( - ctx, + } + /// Instantiate a contract with the specified code hash. + /// + /// # Deprecation + /// + /// This is equivalent to calling the newer version of this function. The newer version + /// drops the now unnecessary length fields. + /// + /// # Note + /// + /// The values `_code_hash_len` and `_value_len` are ignored because the encoded sizes + /// of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards + /// compatibility. Consider switching to the newest version of this function. + fn seal_instantiate( + ctx: Runtime, code_hash_ptr: u32, _code_hash_len: u32, gas: u64, @@ -1312,9 +1374,9 @@ define_env!(Env, , output_ptr: u32, output_len_ptr: u32, salt_ptr: u32, - salt_len: u32 - ) -> ReturnCode => { - ctx.instantiate ( + salt_len: u32, + ) -> Result { + ctx.instantiate( code_hash_ptr, gas, value_ptr, @@ -1327,50 +1389,51 @@ define_env!(Env, , salt_ptr, salt_len, ) - }, + } - // Instantiate a contract with the specified code hash. - // - // This function creates an account and executes the constructor defined in the code specified - // by the code hash. The address of this new account is copied to `address_ptr` and its length - // to `address_len_ptr`. The constructors output buffer is copied to `output_ptr` and its - // length to `output_len_ptr`. The copy of the output buffer and address can be skipped by - // supplying the sentinel value of `SENTINEL` to `output_ptr` or `address_ptr`. - // - // `value` must be at least the minimum balance. Otherwise the instantiation fails and the - // contract is not created. - // - // # Parameters - // - // - code_hash_ptr: a pointer to the buffer that contains the initializer code. - // - gas: how much gas to devote to the execution of the initializer code. - // - value_ptr: a pointer to the buffer with value, how much value to send. - // Should be decodable as a `T::Balance`. Traps otherwise. - // - input_data_ptr: a pointer to a buffer to be used as input data to the initializer code. - // - input_data_len: length of the input data buffer. - // - address_ptr: a pointer where the new account's address is copied to. - // - address_len_ptr: in-out pointer to where the length of the buffer is read from - // and the actual length is written to. - // - output_ptr: a pointer where the output buffer is copied to. - // - output_len_ptr: in-out pointer to where the length of the buffer is read from - // and the actual length is written to. - // - salt_ptr: Pointer to raw bytes used for address derivation. See `fn contract_address`. - // - salt_len: length in bytes of the supplied salt. - // - // # Errors - // - // Please consult the `ReturnCode` enum declaration for more information on those - // errors. Here we only note things specific to this function. - // - // An error means that the account wasn't created and no address or output buffer - // is returned unless stated otherwise. - // - // `ReturnCode::CalleeReverted`: Output buffer is returned. - // `ReturnCode::CalleeTrapped` - // `ReturnCode::TransferFailed` - // `ReturnCode::CodeNotFound` - [seal1] seal_instantiate( - ctx, + /// Instantiate a contract with the specified code hash. + /// + /// This function creates an account and executes the constructor defined in the code specified + /// by the code hash. The address of this new account is copied to `address_ptr` and its length + /// to `address_len_ptr`. The constructors output buffer is copied to `output_ptr` and its + /// length to `output_len_ptr`. The copy of the output buffer and address can be skipped by + /// supplying the sentinel value of `SENTINEL` to `output_ptr` or `address_ptr`. + /// + /// `value` must be at least the minimum balance. Otherwise the instantiation fails and the + /// contract is not created. + /// + /// # Parameters + /// + /// - code_hash_ptr: a pointer to the buffer that contains the initializer code. + /// - gas: how much gas to devote to the execution of the initializer code. + /// - value_ptr: a pointer to the buffer with value, how much value to send. Should be decodable + /// as a `T::Balance`. Traps otherwise. + /// - input_data_ptr: a pointer to a buffer to be used as input data to the initializer code. + /// - input_data_len: length of the input data buffer. + /// - address_ptr: a pointer where the new account's address is copied to. + /// - address_len_ptr: in-out pointer to where the length of the buffer is read from and the + /// actual length is written to. + /// - output_ptr: a pointer where the output buffer is copied to. + /// - output_len_ptr: in-out pointer to where the length of the buffer is read from and the + /// actual length is written to. + /// - salt_ptr: Pointer to raw bytes used for address derivation. See `fn contract_address`. + /// - salt_len: length in bytes of the supplied salt. + /// + /// # Errors + /// + /// Please consult the `ReturnCode` enum declaration for more information on those + /// errors. Here we only note things specific to this function. + /// + /// An error means that the account wasn't created and no address or output buffer + /// is returned unless stated otherwise. + /// + /// `ReturnCode::CalleeReverted`: Output buffer is returned. + /// `ReturnCode::CalleeTrapped` + /// `ReturnCode::TransferFailed` + /// `ReturnCode::CodeNotFound` + #[version(1)] + fn seal_instantiate( + ctx: Runtime, code_hash_ptr: u32, gas: u64, value_ptr: u32, @@ -1381,8 +1444,8 @@ define_env!(Env, , output_ptr: u32, output_len_ptr: u32, salt_ptr: u32, - salt_len: u32 - ) -> ReturnCode => { + salt_len: u32, + ) -> Result { ctx.instantiate( code_hash_ptr, gas, @@ -1396,54 +1459,59 @@ define_env!(Env, , salt_ptr, salt_len, ) - }, + } - // Remove the calling account and transfer remaining balance. - // - // # Deprecation - // - // This is equivalent to calling the newer version of this function. The newer version - // drops the now unnecessary length fields. - // - // # Note - // - // The value `_beneficiary_len` is ignored because the encoded sizes - // this type is fixed through `[`MaxEncodedLen`]. The field exist for backwards - // compatibility. Consider switching to the newest version of this function. - [seal0] seal_terminate(ctx, beneficiary_ptr: u32, _beneficiary_len: u32) => { + /// Remove the calling account and transfer remaining balance. + /// + /// # Deprecation + /// + /// This is equivalent to calling the newer version of this function. The newer version + /// drops the now unnecessary length fields. + /// + /// # Note + /// + /// The value `_beneficiary_len` is ignored because the encoded sizes + /// this type is fixed through `[`MaxEncodedLen`]. The field exist for backwards + /// compatibility. Consider switching to the newest version of this function. + fn seal_terminate( + ctx: Runtime, + beneficiary_ptr: u32, + _beneficiary_len: u32, + ) -> Result<(), TrapReason> { ctx.terminate(beneficiary_ptr) - }, + } - // Remove the calling account and transfer remaining **free** balance. - // - // This function never returns. Either the termination was successful and the - // execution of the destroyed contract is halted. Or it failed during the termination - // which is considered fatal and results in a trap + rollback. - // - // - beneficiary_ptr: a pointer to the address of the beneficiary account where all - // where all remaining funds of the caller are transferred. - // Should be decodable as an `T::AccountId`. Traps otherwise. - // - // # Traps - // - // - The contract is live i.e is already on the call stack. - // - Failed to send the balance to the beneficiary. - // - The deletion queue is full. - [seal1] seal_terminate(ctx, beneficiary_ptr: u32) => { + /// Remove the calling account and transfer remaining **free** balance. + /// + /// This function never returns. Either the termination was successful and the + /// execution of the destroyed contract is halted. Or it failed during the termination + /// which is considered fatal and results in a trap + rollback. + /// + /// - beneficiary_ptr: a pointer to the address of the beneficiary account where all where all + /// remaining funds of the caller are transferred. Should be decodable as an `T::AccountId`. + /// Traps otherwise. + /// + /// # Traps + /// + /// - The contract is live i.e is already on the call stack. + /// - Failed to send the balance to the beneficiary. + /// - The deletion queue is full. + #[version(1)] + fn seal_terminate(ctx: Runtime, beneficiary_ptr: u32) -> Result<(), TrapReason> { ctx.terminate(beneficiary_ptr) - }, + } - // Stores the input passed by the caller into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // # Note - // - // This function traps if the input was previously forwarded by a `seal_call`. - [seal0] seal_input(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the input passed by the caller into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// # Note + /// + /// This function traps if the input was previously forwarded by a `seal_call`. + fn seal_input(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::InputBase)?; if let Some(input) = ctx.input_data.take() { ctx.write_sandbox_output(out_ptr, out_len_ptr, &input, false, |len| { @@ -1454,300 +1522,403 @@ define_env!(Env, , } else { Err(Error::::InputForwarded.into()) } - }, + } - // Cease contract execution and save a data buffer as a result of the execution. - // - // This function never returns as it stops execution of the caller. - // This is the only way to return a data buffer to the caller. Returning from - // execution without calling this function is equivalent to calling: - // ``` - // seal_return(0, 0, 0); - // ``` - // - // The flags argument is a bitfield that can be used to signal special return - // conditions to the supervisor: - // --- lsb --- - // bit 0 : REVERT - Revert all storage changes made by the caller. - // bit [1, 31]: Reserved for future use. - // --- msb --- - // - // Using a reserved bit triggers a trap. - [seal0] seal_return(ctx, flags: u32, data_ptr: u32, data_len: u32) => { + /// Cease contract execution and save a data buffer as a result of the execution. + /// + /// This function never returns as it stops execution of the caller. + /// This is the only way to return a data buffer to the caller. Returning from + /// execution without calling this function is equivalent to calling: + /// ``` + /// seal_return(0, 0, 0); + /// ``` + /// + /// The flags argument is a bitfield that can be used to signal special return + /// conditions to the supervisor: + /// --- lsb --- + /// bit 0 : REVERT - Revert all storage changes made by the caller. + /// bit [1, 31]: Reserved for future use. + /// --- msb --- + /// + /// Using a reserved bit triggers a trap. + fn seal_return( + ctx: Runtime, + flags: u32, + data_ptr: u32, + data_len: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Return(data_len))?; Err(TrapReason::Return(ReturnData { flags, data: ctx.read_sandbox_memory(data_ptr, data_len)?, })) - }, + } - // Stores the address of the caller into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the - // extrinsic will be returned. Otherwise, if this call is initiated by another contract then the - // address of the contract will be returned. The value is encoded as T::AccountId. - [seal0] seal_caller(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the address of the caller into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the + /// extrinsic will be returned. Otherwise, if this call is initiated by another contract then + /// the address of the contract will be returned. The value is encoded as T::AccountId. + fn seal_caller(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Caller)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.caller().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.caller().encode(), + false, + already_charged, )?) - }, + } - // Checks whether a specified address belongs to a contract. - // - // # Parameters - // - // - account_ptr: a pointer to the address of the beneficiary account - // Should be decodable as an `T::AccountId`. Traps otherwise. - // - // Returned value is a u32-encoded boolean: (0 = false, 1 = true). - [seal0] seal_is_contract(ctx, account_ptr: u32) -> u32 => { + /// Checks whether a specified address belongs to a contract. + /// + /// # Parameters + /// + /// - account_ptr: a pointer to the address of the beneficiary account Should be decodable as an + /// `T::AccountId`. Traps otherwise. + /// + /// Returned value is a u32-encoded boolean: (0 = false, 1 = true). + fn seal_is_contract(ctx: Runtime, account_ptr: u32) -> Result { ctx.charge_gas(RuntimeCosts::IsContract)?; let address: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(account_ptr)?; Ok(ctx.ext.is_contract(&address) as u32) - }, + } - // Retrieve the code hash for a specified contract address. - // - // # Parameters - // - // - `account_ptr`: a pointer to the address in question. - // Should be decodable as an `T::AccountId`. Traps otherwise. - // - `out_ptr`: pointer to the linear memory where the returning value is written to. - // - `out_len_ptr`: in-out pointer into linear memory where the buffer length - // is read from and the value length is written to. - // - // # Errors - // - // `ReturnCode::KeyNotFound` - [seal0] seal_code_hash(ctx, account_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + /// Retrieve the code hash for a specified contract address. + /// + /// # Parameters + /// + /// - `account_ptr`: a pointer to the address in question. Should be decodable as an + /// `T::AccountId`. Traps otherwise. + /// - `out_ptr`: pointer to the linear memory where the returning value is written to. + /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and + /// the value length is written to. + /// + /// # Errors + /// + /// `ReturnCode::KeyNotFound` + fn seal_code_hash( + ctx: Runtime, + account_ptr: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { ctx.charge_gas(RuntimeCosts::CodeHash)?; let address: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(account_ptr)?; if let Some(value) = ctx.ext.code_hash(&address) { - ctx.write_sandbox_output(out_ptr, out_len_ptr, &value.encode(), false, already_charged)?; + ctx.write_sandbox_output( + out_ptr, + out_len_ptr, + &value.encode(), + false, + already_charged, + )?; Ok(ReturnCode::Success) } else { Ok(ReturnCode::KeyNotFound) } - }, + } - // Retrieve the code hash of the currently executing contract. - // - // # Parameters - // - // - `out_ptr`: pointer to the linear memory where the returning value is written to. - // - `out_len_ptr`: in-out pointer into linear memory where the buffer length - // is read from and the value length is written to. - [seal0] seal_own_code_hash(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Retrieve the code hash of the currently executing contract. + /// + /// # Parameters + /// + /// - `out_ptr`: pointer to the linear memory where the returning value is written to. + /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and + /// the value length is written to. + fn seal_own_code_hash( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::OwnCodeHash)?; let code_hash_encoded = &ctx.ext.own_code_hash().encode(); - Ok(ctx.write_sandbox_output(out_ptr, out_len_ptr, code_hash_encoded, false, already_charged)?) - }, + Ok(ctx.write_sandbox_output( + out_ptr, + out_len_ptr, + code_hash_encoded, + false, + already_charged, + )?) + } - // Checks whether the caller of the current contract is the origin of the whole call stack. - // - // Prefer this over `seal_is_contract` when checking whether your contract is being called by a contract - // or a plain account. The reason is that it performs better since it does not need to - // do any storage lookups. - // - // A return value of`true` indicates that this contract is being called by a plain account - // and `false` indicates that the caller is another contract. - // - // Returned value is a u32-encoded boolean: (0 = false, 1 = true). - [seal0] seal_caller_is_origin(ctx) -> u32 => { + /// Checks whether the caller of the current contract is the origin of the whole call stack. + /// + /// Prefer this over `seal_is_contract` when checking whether your contract is being called by a + /// contract or a plain account. The reason is that it performs better since it does not need to + /// do any storage lookups. + /// + /// A return value of`true` indicates that this contract is being called by a plain account + /// and `false` indicates that the caller is another contract. + /// + /// Returned value is a u32-encoded boolean: (0 = false, 1 = true). + fn seal_caller_is_origin(ctx: Runtime) -> Result { ctx.charge_gas(RuntimeCosts::CallerIsOrigin)?; Ok(ctx.ext.caller_is_origin() as u32) - }, + } - // Stores the address of the current contract into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - [seal0] seal_address(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the address of the current contract into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + fn seal_address( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Address)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.address().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.address().encode(), + false, + already_charged, )?) - }, + } - // Stores the price for the specified amount of gas into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // The data is encoded as T::Balance. - // - // # Note - // - // It is recommended to avoid specifying very small values for `gas` as the prices for a single - // gas can be smaller than one. - [seal0] seal_weight_to_fee(ctx, gas: u64, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the price for the specified amount of gas into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// The data is encoded as T::Balance. + /// + /// # Note + /// + /// It is recommended to avoid specifying very small values for `gas` as the prices for a single + /// gas can be smaller than one. + fn seal_weight_to_fee( + ctx: Runtime, + gas: u64, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { + let gas = Weight::from_ref_time(gas); ctx.charge_gas(RuntimeCosts::WeightToFee)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.get_weight_price(gas).encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.get_weight_price(gas).encode(), + false, + already_charged, )?) - }, + } - // Stores the amount of gas left into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // The data is encoded as Gas. - [seal0] seal_gas_left(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the amount of gas left into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// The data is encoded as Gas. + fn seal_gas_left( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::GasLeft)?; let gas_left = &ctx.ext.gas_meter().gas_left().encode(); - Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, gas_left, false, already_charged, - )?) - }, + Ok(ctx.write_sandbox_output(out_ptr, out_len_ptr, gas_left, false, already_charged)?) + } - // Stores the **free* balance of the current account into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // The data is encoded as T::Balance. - [seal0] seal_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the **free* balance of the current account into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// The data is encoded as T::Balance. + fn seal_balance( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Balance)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.balance().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.balance().encode(), + false, + already_charged, )?) - }, + } - // Stores the value transferred along with this call/instantiate into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // The data is encoded as T::Balance. - [seal0] seal_value_transferred(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the value transferred along with this call/instantiate into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// The data is encoded as T::Balance. + fn seal_value_transferred( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::ValueTransferred)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.value_transferred().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.value_transferred().encode(), + false, + already_charged, )?) - }, + } - // Stores a random number for the current block and the given subject into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // The data is encoded as T::Hash. - // - // # Deprecation - // - // This function is deprecated. Users should migrate to the version in the "seal1" module. - [seal0] seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { + /// Stores a random number for the current block and the given subject into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// The data is encoded as T::Hash. + /// + /// # Deprecation + /// + /// This function is deprecated. Users should migrate to the version in the "seal1" module. + fn seal_random( + ctx: Runtime, + subject_ptr: u32, + subject_len: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Random)?; if subject_len > ctx.ext.schedule().limits.subject_len { - return Err(Error::::RandomSubjectTooLong.into()); + return Err(Error::::RandomSubjectTooLong.into()) } let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).0.encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.random(&subject_buf).0.encode(), + false, + already_charged, )?) - }, + } - // Stores a random number for the current block and the given subject into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // The data is encoded as (T::Hash, T::BlockNumber). - // - // # Changes from v0 - // - // In addition to the seed it returns the block number since which it was determinable - // by chain observers. - // - // # Note - // - // The returned seed should only be used to distinguish commitments made before - // the returned block number. If the block number is too early (i.e. commitments were - // made afterwards), then ensure no further commitments may be made and repeatedly - // call this on later blocks until the block number returned is later than the latest - // commitment. - [seal1] seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { + /// Stores a random number for the current block and the given subject into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// The data is encoded as (T::Hash, T::BlockNumber). + /// + /// # Changes from v0 + /// + /// In addition to the seed it returns the block number since which it was determinable + /// by chain observers. + /// + /// # Note + /// + /// The returned seed should only be used to distinguish commitments made before + /// the returned block number. If the block number is too early (i.e. commitments were + /// made afterwards), then ensure no further commitments may be made and repeatedly + /// call this on later blocks until the block number returned is later than the latest + /// commitment. + #[version(1)] + fn seal_random( + ctx: Runtime, + subject_ptr: u32, + subject_len: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Random)?; if subject_len > ctx.ext.schedule().limits.subject_len { - return Err(Error::::RandomSubjectTooLong.into()); + return Err(Error::::RandomSubjectTooLong.into()) } let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.random(&subject_buf).encode(), + false, + already_charged, )?) - }, + } - // Load the latest block timestamp into the supplied buffer - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - [seal0] seal_now(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Load the latest block timestamp into the supplied buffer + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + fn seal_now(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Now)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.now().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.now().encode(), + false, + already_charged, )?) - }, + } - // Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. - // - // The data is encoded as T::Balance. - [seal0] seal_minimum_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. + /// + /// The data is encoded as T::Balance. + fn seal_minimum_balance( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::MinimumBalance)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.minimum_balance().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.minimum_balance().encode(), + false, + already_charged, )?) - }, + } - // Stores the tombstone deposit into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // # Deprecation - // - // There is no longer a tombstone deposit. This function always returns 0. - [seal0] seal_tombstone_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the tombstone deposit into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// # Deprecation + /// + /// There is no longer a tombstone deposit. This function always returns 0. + fn seal_tombstone_deposit( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Balance)?; let deposit = >::zero().encode(); Ok(ctx.write_sandbox_output(out_ptr, out_len_ptr, &deposit, false, already_charged)?) - }, + } - // Was used to restore the given destination contract sacrificing the caller. - // - // # Note - // - // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity - [seal0] seal_restore_to( - ctx, + /// Was used to restore the given destination contract sacrificing the caller. + /// + /// # Note + /// + /// The state rent functionality was removed. This is stub only exists for + /// backwards compatiblity + fn seal_restore_to( + ctx: Runtime, _dest_ptr: u32, _dest_len: u32, _code_hash_ptr: u32, @@ -1755,45 +1926,46 @@ define_env!(Env, , _rent_allowance_ptr: u32, _rent_allowance_len: u32, _delta_ptr: u32, - _delta_count: u32 - ) => { + _delta_count: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) - }, + } - // Was used to restore the given destination contract sacrificing the caller. - // - // # Note - // - // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity - [seal1] seal_restore_to( - ctx, + /// Was used to restore the given destination contract sacrificing the caller. + /// + /// # Note + /// + /// The state rent functionality was removed. This is stub only exists for + /// backwards compatiblity + #[version(1)] + fn seal_restore_to( + ctx: Runtime, _dest_ptr: u32, _code_hash_ptr: u32, _rent_allowance_ptr: u32, _delta_ptr: u32, - _delta_count: u32 - ) => { + _delta_count: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) - }, + } - // Deposit a contract event with the data buffer and optional list of topics. There is a limit - // on the maximum number of topics specified by `event_topics`. - // - // - topics_ptr - a pointer to the buffer of topics encoded as `Vec`. The value of this - // is ignored if `topics_len` is set to 0. The topics list can't contain duplicates. - // - topics_len - the length of the topics buffer. Pass 0 if you want to pass an empty vector. - // - data_ptr - a pointer to a raw data buffer which will saved along the event. - // - data_len - the length of the data buffer. - [seal0] seal_deposit_event( - ctx, + /// Deposit a contract event with the data buffer and optional list of topics. There is a limit + /// on the maximum number of topics specified by `event_topics`. + /// + /// - topics_ptr - a pointer to the buffer of topics encoded as `Vec`. The value of + /// this is ignored if `topics_len` is set to 0. The topics list can't contain duplicates. + /// - topics_len - the length of the topics buffer. Pass 0 if you want to pass an empty vector. + /// - data_ptr - a pointer to a raw data buffer which will saved along the event. + /// - data_len - the length of the data buffer. + fn seal_deposit_event( + ctx: Runtime, topics_ptr: u32, topics_len: u32, data_ptr: u32, - data_len: u32 - ) => { + data_len: u32, + ) -> Result<(), TrapReason> { fn has_duplicates(items: &mut Vec) -> bool { // # Warning // @@ -1801,40 +1973,35 @@ define_env!(Env, , // because we are rejecting duplicates which removes the non determinism. items.sort_unstable(); // Find any two consecutive equal elements. - items.windows(2).any(|w| { - match &w { - &[a, b] => a == b, - _ => false, - } + items.windows(2).any(|w| match &w { + &[a, b] => a == b, + _ => false, }) } let num_topic = topics_len .checked_div(sp_std::mem::size_of::>() as u32) .ok_or("Zero sized topics are not allowed")?; - ctx.charge_gas(RuntimeCosts::DepositEvent { - num_topic, - len: data_len, - })?; + ctx.charge_gas(RuntimeCosts::DepositEvent { num_topic, len: data_len })?; if data_len > ctx.ext.max_value_size() { - return Err(Error::::ValueTooLarge.into()); + return Err(Error::::ValueTooLarge.into()) } - let mut topics: Vec::::T>> = match topics_len { + let mut topics: Vec::T>> = match topics_len { 0 => Vec::new(), _ => ctx.read_sandbox_memory_as_unbounded(topics_ptr, topics_len)?, }; // If there are more than `event_topics`, then trap. if topics.len() > ctx.ext.schedule().limits.event_topics as usize { - return Err(Error::::TooManyTopics.into()); + return Err(Error::::TooManyTopics.into()) } // Check for duplicate topics. If there are any, then trap. // Complexity O(n * log(n)) and no additional allocations. // This also sorts the topics. if has_duplicates(&mut topics) { - return Err(Error::::DuplicateTopics.into()); + return Err(Error::::DuplicateTopics.into()) } let event_data = ctx.read_sandbox_memory(data_ptr, data_len)?; @@ -1842,262 +2009,303 @@ define_env!(Env, , ctx.ext.deposit_event(topics, event_data); Ok(()) - }, + } - // Was used to set rent allowance of the contract. - // - // # Note - // - // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity. - [seal0] seal_set_rent_allowance(ctx, _value_ptr: u32, _value_len: u32) => { + /// Was used to set rent allowance of the contract. + /// + /// # Note + /// + /// The state rent functionality was removed. This is stub only exists for + /// backwards compatiblity. + fn seal_set_rent_allowance( + ctx: Runtime, + _value_ptr: u32, + _value_len: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) - }, + } - // Was used to set rent allowance of the contract. - // - // # Note - // - // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity. - [seal1] seal_set_rent_allowance(ctx, _value_ptr: u32) => { + /// Was used to set rent allowance of the contract. + /// + /// # Note + /// + /// The state rent functionality was removed. This is stub only exists for + /// backwards compatiblity. + #[version(1)] + fn seal_set_rent_allowance(ctx: Runtime, _value_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) - }, + } - // Was used to store the rent allowance into the supplied buffer. - // - // # Note - // - // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity. - [seal0] seal_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Was used to store the rent allowance into the supplied buffer. + /// + /// # Note + /// + /// The state rent functionality was removed. This is stub only exists for + /// backwards compatiblity. + fn seal_rent_allowance( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Balance)?; let rent_allowance = >::max_value().encode(); Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &rent_allowance, false, already_charged + out_ptr, + out_len_ptr, + &rent_allowance, + false, + already_charged, )?) - }, + } - // Stores the current block number of the current contract into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - [seal0] seal_block_number(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the current block number of the current contract into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + fn seal_block_number( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::BlockNumber)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.block_number().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.block_number().encode(), + false, + already_charged, )?) - }, + } - // Computes the SHA2 256-bit hash on the given input buffer. - // - // Returns the result directly into the given output buffer. - // - // # Note - // - // - The `input` and `output` buffer may overlap. - // - The output buffer is expected to hold at least 32 bytes (256 bits). - // - It is the callers responsibility to provide an output buffer that - // is large enough to hold the expected amount of bytes returned by the - // chosen hash function. - // - // # Parameters - // - // - `input_ptr`: the pointer into the linear memory where the input - // data is placed. - // - `input_len`: the length of the input data in bytes. - // - `output_ptr`: the pointer into the linear memory where the output - // data is placed. The function will write the result - // directly into this buffer. - [seal0] seal_hash_sha2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + /// Computes the SHA2 256-bit hash on the given input buffer. + /// + /// Returns the result directly into the given output buffer. + /// + /// # Note + /// + /// - The `input` and `output` buffer may overlap. + /// - The output buffer is expected to hold at least 32 bytes (256 bits). + /// - It is the callers responsibility to provide an output buffer that is large enough to hold + /// the expected amount of bytes returned by the chosen hash function. + /// + /// # Parameters + /// + /// - `input_ptr`: the pointer into the linear memory where the input data is placed. + /// - `input_len`: the length of the input data in bytes. + /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The + /// function will write the result directly into this buffer. + fn seal_hash_sha2_256( + ctx: Runtime, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::HashSha256(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(sha2_256, input_ptr, input_len, output_ptr)?) - }, + } - // Computes the KECCAK 256-bit hash on the given input buffer. - // - // Returns the result directly into the given output buffer. - // - // # Note - // - // - The `input` and `output` buffer may overlap. - // - The output buffer is expected to hold at least 32 bytes (256 bits). - // - It is the callers responsibility to provide an output buffer that - // is large enough to hold the expected amount of bytes returned by the - // chosen hash function. - // - // # Parameters - // - // - `input_ptr`: the pointer into the linear memory where the input - // data is placed. - // - `input_len`: the length of the input data in bytes. - // - `output_ptr`: the pointer into the linear memory where the output - // data is placed. The function will write the result - // directly into this buffer. - [seal0] seal_hash_keccak_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + /// Computes the KECCAK 256-bit hash on the given input buffer. + /// + /// Returns the result directly into the given output buffer. + /// + /// # Note + /// + /// - The `input` and `output` buffer may overlap. + /// - The output buffer is expected to hold at least 32 bytes (256 bits). + /// - It is the callers responsibility to provide an output buffer that is large enough to hold + /// the expected amount of bytes returned by the chosen hash function. + /// + /// # Parameters + /// + /// - `input_ptr`: the pointer into the linear memory where the input data is placed. + /// - `input_len`: the length of the input data in bytes. + /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The + /// function will write the result directly into this buffer. + fn seal_hash_keccak_256( + ctx: Runtime, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::HashKeccak256(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(keccak_256, input_ptr, input_len, output_ptr)?) - }, + } - // Computes the BLAKE2 256-bit hash on the given input buffer. - // - // Returns the result directly into the given output buffer. - // - // # Note - // - // - The `input` and `output` buffer may overlap. - // - The output buffer is expected to hold at least 32 bytes (256 bits). - // - It is the callers responsibility to provide an output buffer that - // is large enough to hold the expected amount of bytes returned by the - // chosen hash function. - // - // # Parameters - // - // - `input_ptr`: the pointer into the linear memory where the input - // data is placed. - // - `input_len`: the length of the input data in bytes. - // - `output_ptr`: the pointer into the linear memory where the output - // data is placed. The function will write the result - // directly into this buffer. - [seal0] seal_hash_blake2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + /// Computes the BLAKE2 256-bit hash on the given input buffer. + /// + /// Returns the result directly into the given output buffer. + /// + /// # Note + /// + /// - The `input` and `output` buffer may overlap. + /// - The output buffer is expected to hold at least 32 bytes (256 bits). + /// - It is the callers responsibility to provide an output buffer that is large enough to hold + /// the expected amount of bytes returned by the chosen hash function. + /// + /// # Parameters + /// + /// - `input_ptr`: the pointer into the linear memory where the input data is placed. + /// - `input_len`: the length of the input data in bytes. + /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The + /// function will write the result directly into this buffer. + fn seal_hash_blake2_256( + ctx: Runtime, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::HashBlake256(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(blake2_256, input_ptr, input_len, output_ptr)?) - }, + } - // Computes the BLAKE2 128-bit hash on the given input buffer. - // - // Returns the result directly into the given output buffer. - // - // # Note - // - // - The `input` and `output` buffer may overlap. - // - The output buffer is expected to hold at least 16 bytes (128 bits). - // - It is the callers responsibility to provide an output buffer that - // is large enough to hold the expected amount of bytes returned by the - // chosen hash function. - // - // # Parameters - // - // - `input_ptr`: the pointer into the linear memory where the input - // data is placed. - // - `input_len`: the length of the input data in bytes. - // - `output_ptr`: the pointer into the linear memory where the output - // data is placed. The function will write the result - // directly into this buffer. - [seal0] seal_hash_blake2_128(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + /// Computes the BLAKE2 128-bit hash on the given input buffer. + /// + /// Returns the result directly into the given output buffer. + /// + /// # Note + /// + /// - The `input` and `output` buffer may overlap. + /// - The output buffer is expected to hold at least 16 bytes (128 bits). + /// - It is the callers responsibility to provide an output buffer that is large enough to hold + /// the expected amount of bytes returned by the chosen hash function. + /// + /// # Parameters + /// + /// - `input_ptr`: the pointer into the linear memory where the input data is placed. + /// - `input_len`: the length of the input data in bytes. + /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The + /// function will write the result directly into this buffer. + fn seal_hash_blake2_128( + ctx: Runtime, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::HashBlake128(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(blake2_128, input_ptr, input_len, output_ptr)?) - }, + } - // Call into the chain extension provided by the chain if any. - // - // Handling of the input values is up to the specific chain extension and so is the - // return value. The extension can decide to use the inputs as primitive inputs or as - // in/out arguments by interpreting them as pointers. Any caller of this function - // must therefore coordinate with the chain that it targets. - // - // # Note - // - // If no chain extension exists the contract will trap with the `NoChainExtension` - // module error. - [seal0] seal_call_chain_extension( - ctx, - func_id: u32, + /// Call into the chain extension provided by the chain if any. + /// + /// Handling of the input values is up to the specific chain extension and so is the + /// return value. The extension can decide to use the inputs as primitive inputs or as + /// in/out arguments by interpreting them as pointers. Any caller of this function + /// must therefore coordinate with the chain that it targets. + /// + /// # Note + /// + /// If no chain extension exists the contract will trap with the `NoChainExtension` + /// module error. + fn seal_call_chain_extension( + ctx: Runtime, + id: u32, input_ptr: u32, input_len: u32, output_ptr: u32, - output_len_ptr: u32 - ) -> u32 => { + output_len_ptr: u32, + ) -> Result { use crate::chain_extension::{ChainExtension, Environment, RetVal}; if !::ChainExtension::enabled() { - return Err(Error::::NoChainExtension.into()); + return Err(Error::::NoChainExtension.into()) } - let env = Environment::new(ctx, input_ptr, input_len, output_ptr, output_len_ptr); - match ::ChainExtension::call(func_id, env)? { + let mut chain_extension = ctx.chain_extension.take().expect( + "Constructor initializes with `Some`. This is the only place where it is set to `None`.\ + It is always reset to `Some` afterwards. qed" + ); + let env = Environment::new(ctx, id, input_ptr, input_len, output_ptr, output_len_ptr); + let ret = match chain_extension.call(env)? { RetVal::Converging(val) => Ok(val), - RetVal::Diverging{flags, data} => Err(TrapReason::Return(ReturnData { - flags: flags.bits(), - data, - })), - } - }, + RetVal::Diverging { flags, data } => + Err(TrapReason::Return(ReturnData { flags: flags.bits(), data })), + }; + ctx.chain_extension = Some(chain_extension); + ret + } - // Emit a custom debug message. - // - // No newlines are added to the supplied message. - // Specifying invalid UTF-8 triggers a trap. - // - // This is a no-op if debug message recording is disabled which is always the case - // when the code is executing on-chain. The message is interpreted as UTF-8 and - // appended to the debug buffer which is then supplied to the calling RPC client. - // - // # Note - // - // Even though no action is taken when debug message recording is disabled there is still - // a non trivial overhead (and weight cost) associated with calling this function. Contract - // languages should remove calls to this function (either at runtime or compile time) when - // not being executed as an RPC. For example, they could allow users to disable logging - // through compile time flags (cargo features) for on-chain deployment. Additionally, the - // return value of this function can be cached in order to prevent further calls at runtime. - [seal0] seal_debug_message(ctx, str_ptr: u32, str_len: u32) -> ReturnCode => { + /// Emit a custom debug message. + /// + /// No newlines are added to the supplied message. + /// Specifying invalid UTF-8 triggers a trap. + /// + /// This is a no-op if debug message recording is disabled which is always the case + /// when the code is executing on-chain. The message is interpreted as UTF-8 and + /// appended to the debug buffer which is then supplied to the calling RPC client. + /// + /// # Note + /// + /// Even though no action is taken when debug message recording is disabled there is still + /// a non trivial overhead (and weight cost) associated with calling this function. Contract + /// languages should remove calls to this function (either at runtime or compile time) when + /// not being executed as an RPC. For example, they could allow users to disable logging + /// through compile time flags (cargo features) for on-chain deployment. Additionally, the + /// return value of this function can be cached in order to prevent further calls at runtime. + fn seal_debug_message( + ctx: Runtime, + str_ptr: u32, + str_len: u32, + ) -> Result { ctx.charge_gas(RuntimeCosts::DebugMessage)?; if ctx.ext.append_debug_buffer("") { let data = ctx.read_sandbox_memory(str_ptr, str_len)?; - let msg = core::str::from_utf8(&data) - .map_err(|_| >::DebugMessageInvalidUTF8)?; + let msg = + core::str::from_utf8(&data).map_err(|_| >::DebugMessageInvalidUTF8)?; ctx.ext.append_debug_buffer(msg); - return Ok(ReturnCode::Success); + return Ok(ReturnCode::Success) } Ok(ReturnCode::LoggingDisabled) - }, + } - // Call some dispatchable of the runtime. - // - // This function decodes the passed in data as the overarching `Call` type of the - // runtime and dispatches it. The weight as specified in the runtime is charged - // from the gas meter. Any weight refunds made by the dispatchable are considered. - // - // The filter specified by `Config::CallFilter` is attached to the origin of - // the dispatched call. - // - // # Parameters - // - // - `input_ptr`: the pointer into the linear memory where the input data is placed. - // - `input_len`: the length of the input data in bytes. - // - // # Return Value - // - // Returns `ReturnCode::Success` when the dispatchable was succesfully executed and - // returned `Ok`. When the dispatchable was exeuted but returned an error - // `ReturnCode::CallRuntimeReturnedError` is returned. The full error is not - // provided because it is not guaranteed to be stable. - // - // # Comparison with `ChainExtension` - // - // Just as a chain extension this API allows the runtime to extend the functionality - // of contracts. While making use of this function is generelly easier it cannot be - // used in call cases. Consider writing a chain extension if you need to do perform - // one of the following tasks: - // - // - Return data. - // - Provide functionality **exclusively** to contracts. - // - Provide custom weights. - // - Avoid the need to keep the `Call` data structure stable. - // - // # Unstable - // - // This function is unstable and subject to change (or removal) in the future. Do not - // deploy a contract using it to a production chain. - [__unstable__] seal_call_runtime(ctx, call_ptr: u32, call_len: u32) -> ReturnCode => { + /// Call some dispatchable of the runtime. + /// + /// This function decodes the passed in data as the overarching `Call` type of the + /// runtime and dispatches it. The weight as specified in the runtime is charged + /// from the gas meter. Any weight refunds made by the dispatchable are considered. + /// + /// The filter specified by `Config::CallFilter` is attached to the origin of + /// the dispatched call. + /// + /// # Parameters + /// + /// - `input_ptr`: the pointer into the linear memory where the input data is placed. + /// - `input_len`: the length of the input data in bytes. + /// + /// # Return Value + /// + /// Returns `ReturnCode::Success` when the dispatchable was succesfully executed and + /// returned `Ok`. When the dispatchable was exeuted but returned an error + /// `ReturnCode::CallRuntimeReturnedError` is returned. The full error is not + /// provided because it is not guaranteed to be stable. + /// + /// # Comparison with `ChainExtension` + /// + /// Just as a chain extension this API allows the runtime to extend the functionality + /// of contracts. While making use of this function is generelly easier it cannot be + /// used in call cases. Consider writing a chain extension if you need to do perform + /// one of the following tasks: + /// + /// - Return data. + /// - Provide functionality **exclusively** to contracts. + /// - Provide custom weights. + /// - Avoid the need to keep the `Call` data structure stable. + /// + /// # Unstable + /// + /// This function is unstable and subject to change (or removal) in the future. Do not + /// deploy a contract using it to a production chain. + #[unstable] + fn seal_call_runtime( + ctx: Runtime, + call_ptr: u32, + call_len: u32, + ) -> Result { use frame_support::{dispatch::GetDispatchInfo, weights::extract_actual_weight}; ctx.charge_gas(RuntimeCosts::CopyFromContract(call_len))?; - let call: ::Call = ctx.read_sandbox_memory_as_unbounded( - call_ptr, call_len - )?; + let call: ::Call = + ctx.read_sandbox_memory_as_unbounded(call_ptr, call_len)?; let dispatch_info = call.get_dispatch_info(); let charged = ctx.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; let result = ctx.ext.call_runtime(call); @@ -2107,27 +2315,31 @@ define_env!(Env, , Ok(_) => Ok(ReturnCode::Success), Err(_) => Ok(ReturnCode::CallRuntimeReturnedError), } - }, + } - // Recovers the ECDSA public key from the given message hash and signature. - // - // Writes the public key into the given output buffer. - // Assumes the secp256k1 curve. - // - // # Parameters - // - // - `signature_ptr`: the pointer into the linear memory where the signature - // is placed. Should be decodable as a 65 bytes. Traps otherwise. - // - `message_hash_ptr`: the pointer into the linear memory where the message - // hash is placed. Should be decodable as a 32 bytes. Traps otherwise. - // - `output_ptr`: the pointer into the linear memory where the output - // data is placed. The buffer should be 33 bytes. The function - // will write the result directly into this buffer. - // - // # Errors - // - // `ReturnCode::EcdsaRecoverFailed` - [seal0] seal_ecdsa_recover(ctx, signature_ptr: u32, message_hash_ptr: u32, output_ptr: u32) -> ReturnCode => { + /// Recovers the ECDSA public key from the given message hash and signature. + /// + /// Writes the public key into the given output buffer. + /// Assumes the secp256k1 curve. + /// + /// # Parameters + /// + /// - `signature_ptr`: the pointer into the linear memory where the signature is placed. Should + /// be decodable as a 65 bytes. Traps otherwise. + /// - `message_hash_ptr`: the pointer into the linear memory where the message hash is placed. + /// Should be decodable as a 32 bytes. Traps otherwise. + /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The + /// buffer should be 33 bytes. The function will write the result directly into this buffer. + /// + /// # Errors + /// + /// `ReturnCode::EcdsaRecoverFailed` + fn seal_ecdsa_recover( + ctx: Runtime, + signature_ptr: u32, + message_hash_ptr: u32, + output_ptr: u32, + ) -> Result { ctx.charge_gas(RuntimeCosts::EcdsaRecovery)?; let mut signature: [u8; 65] = [0; 65]; @@ -2147,65 +2359,72 @@ define_env!(Env, , }, Err(_) => Ok(ReturnCode::EcdsaRecoverFailed), } - }, + } - // Replace the contract code at the specified address with new code. - // - // # Note - // - // There are a couple of important considerations which must be taken into account when - // using this API: - // - // 1. The storage at the code address will remain untouched. This means that contract developers - // must ensure that the storage layout of the new code is compatible with that of the old code. - // - // 2. Contracts using this API can't be assumed as having deterministic addresses. Said another way, - // when using this API you lose the guarantee that an address always identifies a specific code hash. - // - // 3. If a contract calls into itself after changing its code the new call would use - // the new code. However, if the original caller panics after returning from the sub call it - // would revert the changes made by `seal_set_code_hash` and the next caller would use - // the old code. - // - // # Parameters - // - // - `code_hash_ptr`: A pointer to the buffer that contains the new code hash. - // - // # Errors - // - // `ReturnCode::CodeNotFound` - [seal0] seal_set_code_hash(ctx, code_hash_ptr: u32) -> ReturnCode => { + /// Replace the contract code at the specified address with new code. + /// + /// # Note + /// + /// There are a couple of important considerations which must be taken into account when + /// using this API: + /// + /// 1. The storage at the code address will remain untouched. This means that contract + /// developers must ensure that the storage layout of the new code is compatible with that of + /// the old code. + /// + /// 2. Contracts using this API can't be assumed as having deterministic addresses. Said another + /// way, when using this API you lose the guarantee that an address always identifies a specific + /// code hash. + /// 3. If a contract calls into itself after changing its code the new call would use + /// the new code. However, if the original caller panics after returning from the sub call it + /// would revert the changes made by `seal_set_code_hash` and the next caller would use + /// the old code. + /// + /// # Parameters + /// + /// - `code_hash_ptr`: A pointer to the buffer that contains the new code hash. + /// + /// # Errors + /// + /// `ReturnCode::CodeNotFound` + fn seal_set_code_hash( + ctx: Runtime, + code_hash_ptr: u32, + ) -> Result { ctx.charge_gas(RuntimeCosts::SetCodeHash)?; let code_hash: CodeHash<::T> = ctx.read_sandbox_memory_as(code_hash_ptr)?; match ctx.ext.set_code_hash(code_hash) { - Err(err) => { + Err(err) => { let code = Runtime::::err_into_return_code(err)?; Ok(code) }, - Ok(()) => Ok(ReturnCode::Success) + Ok(()) => Ok(ReturnCode::Success), } - }, + } - // Calculates Ethereum address from the ECDSA compressed public key and stores - // it into the supplied buffer. - // - // # Parameters - // - // - `key_ptr`: a pointer to the ECDSA compressed public key. Should be decodable as a 33 bytes value. - // Traps otherwise. - // - `out_ptr`: the pointer into the linear memory where the output - // data is placed. The function will write the result - // directly into this buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // If the available space at `out_ptr` is less than the size of the value a trap is triggered. - // - // # Errors - // - // `ReturnCode::EcdsaRecoverFailed` - [seal0] seal_ecdsa_to_eth_address(ctx, key_ptr: u32, out_ptr: u32) -> ReturnCode => { + /// Calculates Ethereum address from the ECDSA compressed public key and stores + /// it into the supplied buffer. + /// + /// # Parameters + /// + /// - `key_ptr`: a pointer to the ECDSA compressed public key. Should be decodable as a 33 bytes + /// value. Traps otherwise. + /// - `out_ptr`: the pointer into the linear memory where the output data is placed. The + /// function will write the result directly into this buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// If the available space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// # Errors + /// + /// `ReturnCode::EcdsaRecoverFailed` + fn seal_ecdsa_to_eth_address( + ctx: Runtime, + key_ptr: u32, + out_ptr: u32, + ) -> Result { ctx.charge_gas(RuntimeCosts::EcdsaToEthAddress)?; - let mut compressed_key: [u8; 33] = [0;33]; + let mut compressed_key: [u8; 33] = [0; 33]; ctx.read_sandbox_memory_into_buf(key_ptr, &mut compressed_key)?; let result = ctx.ext.ecdsa_to_eth_address(&compressed_key); match result { @@ -2215,5 +2434,5 @@ define_env!(Env, , }, Err(_) => Ok(ReturnCode::EcdsaRecoverFailed), } - }, -); + } +} diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 3c90579e65d53..17be3c3da0138 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_contracts. @@ -166,37 +166,37 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - (1_654_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(1_654_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - (8_564_000 as Weight) + Weight::from_ref_time(8_564_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((868_000 as Weight).saturating_mul(k as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) } // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 1024]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_944_000 as Weight).saturating_mul(q as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_944_000 as RefTimeWeight).scalar_saturating_mul(q as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - (19_016_000 as Weight) + Weight::from_ref_time(19_016_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) @@ -204,11 +204,11 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - (205_194_000 as Weight) + Weight::from_ref_time(205_194_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((53_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(53_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -220,13 +220,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (288_487_000 as Weight) + Weight::from_ref_time(288_487_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((124_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -236,46 +236,46 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - (186_136_000 as Weight) + Weight::from_ref_time(186_136_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) fn call() -> Weight { - (149_232_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(149_232_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - (51_721_000 as Weight) + Weight::from_ref_time(51_721_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((48_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Contracts OwnerInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - (30_016_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(30_016_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:2 w:2) fn set_code() -> Weight { - (27_192_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(27_192_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -283,11 +283,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - (206_405_000 as Weight) + Weight::from_ref_time(206_405_000 as RefTimeWeight) // Standard Error: 112_000 - .saturating_add((40_987_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_987_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -295,12 +295,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - (106_220_000 as Weight) + Weight::from_ref_time(106_220_000 as RefTimeWeight) // Standard Error: 710_000 - .saturating_add((307_648_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(307_648_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -308,12 +308,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - (104_498_000 as Weight) + Weight::from_ref_time(104_498_000 as RefTimeWeight) // Standard Error: 633_000 - .saturating_add((368_901_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(368_901_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -321,11 +321,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - (208_696_000 as Weight) + Weight::from_ref_time(208_696_000 as RefTimeWeight) // Standard Error: 101_000 - .saturating_add((44_445_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(44_445_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -333,11 +333,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - (205_612_000 as Weight) + Weight::from_ref_time(205_612_000 as RefTimeWeight) // Standard Error: 68_000 - .saturating_add((17_145_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -345,11 +345,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - (206_947_000 as Weight) + Weight::from_ref_time(206_947_000 as RefTimeWeight) // Standard Error: 107_000 - .saturating_add((40_789_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_789_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -357,11 +357,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - (208_692_000 as Weight) + Weight::from_ref_time(208_692_000 as RefTimeWeight) // Standard Error: 109_000 - .saturating_add((40_600_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_600_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -369,11 +369,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - (209_811_000 as Weight) + Weight::from_ref_time(209_811_000 as RefTimeWeight) // Standard Error: 208_000 - .saturating_add((116_831_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(116_831_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -381,11 +381,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - (207_406_000 as Weight) + Weight::from_ref_time(207_406_000 as RefTimeWeight) // Standard Error: 117_000 - .saturating_add((40_702_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_702_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -393,11 +393,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - (209_260_000 as Weight) + Weight::from_ref_time(209_260_000 as RefTimeWeight) // Standard Error: 130_000 - .saturating_add((40_479_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_479_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -405,11 +405,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - (206_448_000 as Weight) + Weight::from_ref_time(206_448_000 as RefTimeWeight) // Standard Error: 95_000 - .saturating_add((40_134_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_134_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -417,11 +417,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - (206_969_000 as Weight) + Weight::from_ref_time(206_969_000 as RefTimeWeight) // Standard Error: 116_000 - .saturating_add((40_251_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_251_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -430,11 +430,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - (211_611_000 as Weight) + Weight::from_ref_time(211_611_000 as RefTimeWeight) // Standard Error: 175_000 - .saturating_add((98_675_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(98_675_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -442,11 +442,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - (134_484_000 as Weight) + Weight::from_ref_time(134_484_000 as RefTimeWeight) // Standard Error: 57_000 - .saturating_add((19_329_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(19_329_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -454,11 +454,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - (208_556_000 as Weight) + Weight::from_ref_time(208_556_000 as RefTimeWeight) // Standard Error: 125_000 - .saturating_add((40_328_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_328_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -466,11 +466,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - (268_886_000 as Weight) + Weight::from_ref_time(268_886_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((9_627_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(9_627_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -478,9 +478,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 1]`. fn seal_return(_r: u32, ) -> Weight { - (203_591_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(203_591_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -488,11 +488,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - (204_258_000 as Weight) + Weight::from_ref_time(204_258_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((183_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(183_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -502,13 +502,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - (206_625_000 as Weight) + Weight::from_ref_time(206_625_000 as RefTimeWeight) // Standard Error: 672_000 - .saturating_add((59_377_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(59_377_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((4 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -517,11 +517,11 @@ impl WeightInfo for SubstrateWeight { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - (208_866_000 as Weight) + Weight::from_ref_time(208_866_000 as RefTimeWeight) // Standard Error: 164_000 - .saturating_add((133_438_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(133_438_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -529,11 +529,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - (220_860_000 as Weight) + Weight::from_ref_time(220_860_000 as RefTimeWeight) // Standard Error: 209_000 - .saturating_add((239_951_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(239_951_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -543,15 +543,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (439_782_000 as Weight) + Weight::from_ref_time(439_782_000 as RefTimeWeight) // Standard Error: 1_643_000 - .saturating_add((264_687_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(264_687_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 323_000 - .saturating_add((67_636_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(t as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(t as Weight))) + .saturating_add(Weight::from_ref_time(67_636_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -559,128 +559,128 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - (140_280_000 as Weight) + Weight::from_ref_time(140_280_000 as RefTimeWeight) // Standard Error: 82_000 - .saturating_add((32_717_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(32_717_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - (161_247_000 as Weight) + Weight::from_ref_time(161_247_000 as RefTimeWeight) // Standard Error: 883_000 - .saturating_add((423_997_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(423_997_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - (529_247_000 as Weight) + Weight::from_ref_time(529_247_000 as RefTimeWeight) // Standard Error: 2_745_000 - .saturating_add((85_282_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(55 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(53 as Weight)) - .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(85_282_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(53 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - (529_812_000 as Weight) + Weight::from_ref_time(529_812_000 as RefTimeWeight) // Standard Error: 2_513_000 - .saturating_add((74_554_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(55 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(53 as Weight)) - .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(74_554_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(53 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - (184_803_000 as Weight) + Weight::from_ref_time(184_803_000 as RefTimeWeight) // Standard Error: 733_000 - .saturating_add((404_933_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(404_933_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - (500_958_000 as Weight) + Weight::from_ref_time(500_958_000 as RefTimeWeight) // Standard Error: 2_980_000 - .saturating_add((75_996_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(55 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(52 as Weight)) - .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(75_996_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(52 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - (177_682_000 as Weight) + Weight::from_ref_time(177_682_000 as RefTimeWeight) // Standard Error: 743_000 - .saturating_add((338_172_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(338_172_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (465_285_000 as Weight) + Weight::from_ref_time(465_285_000 as RefTimeWeight) // Standard Error: 2_599_000 - .saturating_add((155_106_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(55 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(155_106_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - (179_118_000 as Weight) + Weight::from_ref_time(179_118_000 as RefTimeWeight) // Standard Error: 572_000 - .saturating_add((311_083_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(311_083_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - (423_056_000 as Weight) + Weight::from_ref_time(423_056_000 as RefTimeWeight) // Standard Error: 2_037_000 - .saturating_add((69_665_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(54 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(69_665_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(54 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - (188_884_000 as Weight) + Weight::from_ref_time(188_884_000 as RefTimeWeight) // Standard Error: 761_000 - .saturating_add((432_781_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(432_781_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - (532_408_000 as Weight) + Weight::from_ref_time(532_408_000 as RefTimeWeight) // Standard Error: 3_348_000 - .saturating_add((164_943_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(55 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(53 as Weight)) - .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(164_943_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(53 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -688,13 +688,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - (127_181_000 as Weight) + Weight::from_ref_time(127_181_000 as RefTimeWeight) // Standard Error: 1_495_000 - .saturating_add((1_500_589_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(1_500_589_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -702,13 +702,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_803_000 - .saturating_add((14_860_909_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(14_860_909_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -716,11 +716,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 6_045_000 - .saturating_add((14_797_140_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(14_797_140_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -729,15 +729,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - (9_196_444_000 as Weight) + Weight::from_ref_time(9_196_444_000 as RefTimeWeight) // Standard Error: 20_486_000 - .saturating_add((1_458_153_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(1_458_153_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 8_000 - .saturating_add((9_718_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(85 as Weight)) - .saturating_add(T::DbWeight::get().reads((81 as Weight).saturating_mul(t as Weight))) - .saturating_add(T::DbWeight::get().writes(81 as Weight)) - .saturating_add(T::DbWeight::get().writes((81 as Weight).saturating_mul(t as Weight))) + .saturating_add(Weight::from_ref_time(9_718_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(85 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(81 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -747,13 +747,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 36_253_000 - .saturating_add((21_201_529_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().reads((320 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - .saturating_add(T::DbWeight::get().writes((320 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(21_201_529_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:81 w:81) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -764,15 +764,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - (12_282_498_000 as Weight) + Weight::from_ref_time(12_282_498_000 as RefTimeWeight) // Standard Error: 48_112_000 - .saturating_add((720_795_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(720_795_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 22_000 - .saturating_add((124_274_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(167 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(t as Weight))) - .saturating_add(T::DbWeight::get().writes(165 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(t as Weight))) + .saturating_add(Weight::from_ref_time(124_274_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(167 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(165 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -780,11 +780,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - (203_959_000 as Weight) + Weight::from_ref_time(203_959_000 as RefTimeWeight) // Standard Error: 142_000 - .saturating_add((61_311_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(61_311_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -792,11 +792,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (349_915_000 as Weight) + Weight::from_ref_time(349_915_000 as RefTimeWeight) // Standard Error: 40_000 - .saturating_add((320_652_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(320_652_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -804,11 +804,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - (209_219_000 as Weight) + Weight::from_ref_time(209_219_000 as RefTimeWeight) // Standard Error: 157_000 - .saturating_add((73_728_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(73_728_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -816,11 +816,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (208_860_000 as Weight) + Weight::from_ref_time(208_860_000 as RefTimeWeight) // Standard Error: 25_000 - .saturating_add((245_718_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(245_718_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -828,11 +828,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - (206_165_000 as Weight) + Weight::from_ref_time(206_165_000 as RefTimeWeight) // Standard Error: 138_000 - .saturating_add((51_644_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(51_644_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -840,11 +840,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (255_955_000 as Weight) + Weight::from_ref_time(255_955_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((95_090_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(95_090_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -852,11 +852,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - (208_153_000 as Weight) + Weight::from_ref_time(208_153_000 as RefTimeWeight) // Standard Error: 140_000 - .saturating_add((51_264_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(51_264_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -864,11 +864,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (278_368_000 as Weight) + Weight::from_ref_time(278_368_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((95_006_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(95_006_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -876,11 +876,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - (331_955_000 as Weight) + Weight::from_ref_time(331_955_000 as RefTimeWeight) // Standard Error: 1_155_000 - .saturating_add((3_069_955_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(3_069_955_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -888,11 +888,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - (207_838_000 as Weight) + Weight::from_ref_time(207_838_000 as RefTimeWeight) // Standard Error: 783_000 - .saturating_add((2_058_503_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_058_503_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -901,317 +901,317 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_567_000 - .saturating_add((774_380_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes((79 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(774_380_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - (73_955_000 as Weight) + Weight::from_ref_time(73_955_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((612_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(612_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - (74_057_000 as Weight) + Weight::from_ref_time(74_057_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - (74_137_000 as Weight) + Weight::from_ref_time(74_137_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_427_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - (73_844_000 as Weight) + Weight::from_ref_time(73_844_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_773_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_773_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - (73_979_000 as Weight) + Weight::from_ref_time(73_979_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_952_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_952_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - (73_924_000 as Weight) + Weight::from_ref_time(73_924_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((941_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(941_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - (73_574_000 as Weight) + Weight::from_ref_time(73_574_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_439_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_439_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - (73_343_000 as Weight) + Weight::from_ref_time(73_343_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_603_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_603_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - (76_267_000 as Weight) + Weight::from_ref_time(76_267_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - (74_877_000 as Weight) + Weight::from_ref_time(74_877_000 as RefTimeWeight) // Standard Error: 12_000 - .saturating_add((7_144_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(7_144_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - (88_665_000 as Weight) + Weight::from_ref_time(88_665_000 as RefTimeWeight) // Standard Error: 20_000 - .saturating_add((9_142_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(9_142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (98_600_000 as Weight) + Weight::from_ref_time(98_600_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((469_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(Weight::from_ref_time(469_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - (74_555_000 as Weight) + Weight::from_ref_time(74_555_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((624_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(624_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - (74_329_000 as Weight) + Weight::from_ref_time(74_329_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((688_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(688_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - (74_612_000 as Weight) + Weight::from_ref_time(74_612_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((909_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(909_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - (76_906_000 as Weight) + Weight::from_ref_time(76_906_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_192_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_192_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - (76_979_000 as Weight) + Weight::from_ref_time(76_979_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_361_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_361_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - (74_370_000 as Weight) + Weight::from_ref_time(74_370_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((661_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(661_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - (73_584_000 as Weight) + Weight::from_ref_time(73_584_000 as RefTimeWeight) // Standard Error: 353_000 - .saturating_add((187_114_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(187_114_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - (74_206_000 as Weight) + Weight::from_ref_time(74_206_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((884_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(884_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - (73_992_000 as Weight) + Weight::from_ref_time(73_992_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((893_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(893_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - (73_985_000 as Weight) + Weight::from_ref_time(73_985_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((891_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(891_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - (74_117_000 as Weight) + Weight::from_ref_time(74_117_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((901_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(901_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - (73_981_000 as Weight) + Weight::from_ref_time(73_981_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((866_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(866_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - (74_104_000 as Weight) + Weight::from_ref_time(74_104_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((868_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - (74_293_000 as Weight) + Weight::from_ref_time(74_293_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((878_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(878_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - (74_055_000 as Weight) + Weight::from_ref_time(74_055_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - (73_710_000 as Weight) + Weight::from_ref_time(73_710_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - (73_917_000 as Weight) + Weight::from_ref_time(73_917_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_355_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_355_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - (74_048_000 as Weight) + Weight::from_ref_time(74_048_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - (74_029_000 as Weight) + Weight::from_ref_time(74_029_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_349_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_349_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - (74_267_000 as Weight) + Weight::from_ref_time(74_267_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - (73_952_000 as Weight) + Weight::from_ref_time(73_952_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - (73_851_000 as Weight) + Weight::from_ref_time(73_851_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_368_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_368_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - (74_034_000 as Weight) + Weight::from_ref_time(74_034_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_348_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_348_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - (73_979_000 as Weight) + Weight::from_ref_time(73_979_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - (74_000_000 as Weight) + Weight::from_ref_time(74_000_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_328_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_328_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - (73_883_000 as Weight) + Weight::from_ref_time(73_883_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_331_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_331_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - (74_216_000 as Weight) + Weight::from_ref_time(74_216_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - (73_989_000 as Weight) + Weight::from_ref_time(73_989_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_998_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_998_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - (73_857_000 as Weight) + Weight::from_ref_time(73_857_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((2_073_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_073_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - (73_801_000 as Weight) + Weight::from_ref_time(73_801_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((2_027_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_027_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - (74_130_000 as Weight) + Weight::from_ref_time(74_130_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((2_064_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_064_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - (74_071_000 as Weight) + Weight::from_ref_time(74_071_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_327_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_327_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - (74_201_000 as Weight) + Weight::from_ref_time(74_201_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((1_330_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_330_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - (74_241_000 as Weight) + Weight::from_ref_time(74_241_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_321_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_321_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - (74_331_000 as Weight) + Weight::from_ref_time(74_331_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add((1_347_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_347_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - (73_674_000 as Weight) + Weight::from_ref_time(73_674_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_359_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_359_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - (73_807_000 as Weight) + Weight::from_ref_time(73_807_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - (73_725_000 as Weight) + Weight::from_ref_time(73_725_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - (73_755_000 as Weight) + Weight::from_ref_time(73_755_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } } @@ -1219,37 +1219,37 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - (1_654_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(1_654_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - (8_564_000 as Weight) + Weight::from_ref_time(8_564_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((868_000 as Weight).saturating_mul(k as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) } // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 1024]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_944_000 as Weight).saturating_mul(q as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_944_000 as RefTimeWeight).scalar_saturating_mul(q as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - (19_016_000 as Weight) + Weight::from_ref_time(19_016_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) @@ -1257,11 +1257,11 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - (205_194_000 as Weight) + Weight::from_ref_time(205_194_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((53_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(53_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -1273,13 +1273,13 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (288_487_000 as Weight) + Weight::from_ref_time(288_487_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((124_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -1289,46 +1289,46 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - (186_136_000 as Weight) + Weight::from_ref_time(186_136_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) fn call() -> Weight { - (149_232_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(149_232_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - (51_721_000 as Weight) + Weight::from_ref_time(51_721_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((48_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Contracts OwnerInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - (30_016_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(30_016_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:2 w:2) fn set_code() -> Weight { - (27_192_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(27_192_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1336,11 +1336,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - (206_405_000 as Weight) + Weight::from_ref_time(206_405_000 as RefTimeWeight) // Standard Error: 112_000 - .saturating_add((40_987_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_987_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1348,12 +1348,12 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - (106_220_000 as Weight) + Weight::from_ref_time(106_220_000 as RefTimeWeight) // Standard Error: 710_000 - .saturating_add((307_648_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(307_648_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1361,12 +1361,12 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - (104_498_000 as Weight) + Weight::from_ref_time(104_498_000 as RefTimeWeight) // Standard Error: 633_000 - .saturating_add((368_901_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(368_901_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1374,11 +1374,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - (208_696_000 as Weight) + Weight::from_ref_time(208_696_000 as RefTimeWeight) // Standard Error: 101_000 - .saturating_add((44_445_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(44_445_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1386,11 +1386,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - (205_612_000 as Weight) + Weight::from_ref_time(205_612_000 as RefTimeWeight) // Standard Error: 68_000 - .saturating_add((17_145_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1398,11 +1398,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - (206_947_000 as Weight) + Weight::from_ref_time(206_947_000 as RefTimeWeight) // Standard Error: 107_000 - .saturating_add((40_789_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_789_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1410,11 +1410,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - (208_692_000 as Weight) + Weight::from_ref_time(208_692_000 as RefTimeWeight) // Standard Error: 109_000 - .saturating_add((40_600_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_600_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1422,11 +1422,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - (209_811_000 as Weight) + Weight::from_ref_time(209_811_000 as RefTimeWeight) // Standard Error: 208_000 - .saturating_add((116_831_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(116_831_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1434,11 +1434,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - (207_406_000 as Weight) + Weight::from_ref_time(207_406_000 as RefTimeWeight) // Standard Error: 117_000 - .saturating_add((40_702_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_702_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1446,11 +1446,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - (209_260_000 as Weight) + Weight::from_ref_time(209_260_000 as RefTimeWeight) // Standard Error: 130_000 - .saturating_add((40_479_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_479_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1458,11 +1458,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - (206_448_000 as Weight) + Weight::from_ref_time(206_448_000 as RefTimeWeight) // Standard Error: 95_000 - .saturating_add((40_134_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_134_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1470,11 +1470,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - (206_969_000 as Weight) + Weight::from_ref_time(206_969_000 as RefTimeWeight) // Standard Error: 116_000 - .saturating_add((40_251_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_251_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1483,11 +1483,11 @@ impl WeightInfo for () { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - (211_611_000 as Weight) + Weight::from_ref_time(211_611_000 as RefTimeWeight) // Standard Error: 175_000 - .saturating_add((98_675_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(98_675_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1495,11 +1495,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - (134_484_000 as Weight) + Weight::from_ref_time(134_484_000 as RefTimeWeight) // Standard Error: 57_000 - .saturating_add((19_329_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(19_329_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1507,11 +1507,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - (208_556_000 as Weight) + Weight::from_ref_time(208_556_000 as RefTimeWeight) // Standard Error: 125_000 - .saturating_add((40_328_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_328_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1519,11 +1519,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - (268_886_000 as Weight) + Weight::from_ref_time(268_886_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((9_627_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(9_627_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1531,9 +1531,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 1]`. fn seal_return(_r: u32, ) -> Weight { - (203_591_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(203_591_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1541,11 +1541,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - (204_258_000 as Weight) + Weight::from_ref_time(204_258_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((183_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(183_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1555,13 +1555,13 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - (206_625_000 as Weight) + Weight::from_ref_time(206_625_000 as RefTimeWeight) // Standard Error: 672_000 - .saturating_add((59_377_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(59_377_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((4 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1570,11 +1570,11 @@ impl WeightInfo for () { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - (208_866_000 as Weight) + Weight::from_ref_time(208_866_000 as RefTimeWeight) // Standard Error: 164_000 - .saturating_add((133_438_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(133_438_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1582,11 +1582,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - (220_860_000 as Weight) + Weight::from_ref_time(220_860_000 as RefTimeWeight) // Standard Error: 209_000 - .saturating_add((239_951_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(239_951_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1596,15 +1596,15 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (439_782_000 as Weight) + Weight::from_ref_time(439_782_000 as RefTimeWeight) // Standard Error: 1_643_000 - .saturating_add((264_687_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(264_687_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 323_000 - .saturating_add((67_636_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(t as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(t as Weight))) + .saturating_add(Weight::from_ref_time(67_636_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1612,128 +1612,128 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - (140_280_000 as Weight) + Weight::from_ref_time(140_280_000 as RefTimeWeight) // Standard Error: 82_000 - .saturating_add((32_717_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(32_717_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - (161_247_000 as Weight) + Weight::from_ref_time(161_247_000 as RefTimeWeight) // Standard Error: 883_000 - .saturating_add((423_997_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(423_997_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - (529_247_000 as Weight) + Weight::from_ref_time(529_247_000 as RefTimeWeight) // Standard Error: 2_745_000 - .saturating_add((85_282_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(55 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(53 as Weight)) - .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(85_282_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(53 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - (529_812_000 as Weight) + Weight::from_ref_time(529_812_000 as RefTimeWeight) // Standard Error: 2_513_000 - .saturating_add((74_554_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(55 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(53 as Weight)) - .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(74_554_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(53 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - (184_803_000 as Weight) + Weight::from_ref_time(184_803_000 as RefTimeWeight) // Standard Error: 733_000 - .saturating_add((404_933_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(404_933_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - (500_958_000 as Weight) + Weight::from_ref_time(500_958_000 as RefTimeWeight) // Standard Error: 2_980_000 - .saturating_add((75_996_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(55 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(52 as Weight)) - .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(75_996_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(52 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - (177_682_000 as Weight) + Weight::from_ref_time(177_682_000 as RefTimeWeight) // Standard Error: 743_000 - .saturating_add((338_172_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(338_172_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (465_285_000 as Weight) + Weight::from_ref_time(465_285_000 as RefTimeWeight) // Standard Error: 2_599_000 - .saturating_add((155_106_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(55 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(155_106_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - (179_118_000 as Weight) + Weight::from_ref_time(179_118_000 as RefTimeWeight) // Standard Error: 572_000 - .saturating_add((311_083_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(311_083_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - (423_056_000 as Weight) + Weight::from_ref_time(423_056_000 as RefTimeWeight) // Standard Error: 2_037_000 - .saturating_add((69_665_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(54 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(69_665_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(54 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - (188_884_000 as Weight) + Weight::from_ref_time(188_884_000 as RefTimeWeight) // Standard Error: 761_000 - .saturating_add((432_781_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(432_781_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - (532_408_000 as Weight) + Weight::from_ref_time(532_408_000 as RefTimeWeight) // Standard Error: 3_348_000 - .saturating_add((164_943_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(55 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(53 as Weight)) - .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(164_943_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(53 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1741,13 +1741,13 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - (127_181_000 as Weight) + Weight::from_ref_time(127_181_000 as RefTimeWeight) // Standard Error: 1_495_000 - .saturating_add((1_500_589_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(1_500_589_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1755,13 +1755,13 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_803_000 - .saturating_add((14_860_909_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(14_860_909_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1769,11 +1769,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 6_045_000 - .saturating_add((14_797_140_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(14_797_140_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -1782,15 +1782,15 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - (9_196_444_000 as Weight) + Weight::from_ref_time(9_196_444_000 as RefTimeWeight) // Standard Error: 20_486_000 - .saturating_add((1_458_153_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(1_458_153_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 8_000 - .saturating_add((9_718_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(85 as Weight)) - .saturating_add(RocksDbWeight::get().reads((81 as Weight).saturating_mul(t as Weight))) - .saturating_add(RocksDbWeight::get().writes(81 as Weight)) - .saturating_add(RocksDbWeight::get().writes((81 as Weight).saturating_mul(t as Weight))) + .saturating_add(Weight::from_ref_time(9_718_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(85 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(81 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1800,13 +1800,13 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 36_253_000 - .saturating_add((21_201_529_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().reads((320 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes((320 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(21_201_529_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:81 w:81) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -1817,15 +1817,15 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - (12_282_498_000 as Weight) + Weight::from_ref_time(12_282_498_000 as RefTimeWeight) // Standard Error: 48_112_000 - .saturating_add((720_795_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(720_795_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 22_000 - .saturating_add((124_274_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(167 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(t as Weight))) - .saturating_add(RocksDbWeight::get().writes(165 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(t as Weight))) + .saturating_add(Weight::from_ref_time(124_274_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(167 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(165 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1833,11 +1833,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - (203_959_000 as Weight) + Weight::from_ref_time(203_959_000 as RefTimeWeight) // Standard Error: 142_000 - .saturating_add((61_311_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(61_311_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1845,11 +1845,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (349_915_000 as Weight) + Weight::from_ref_time(349_915_000 as RefTimeWeight) // Standard Error: 40_000 - .saturating_add((320_652_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(320_652_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1857,11 +1857,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - (209_219_000 as Weight) + Weight::from_ref_time(209_219_000 as RefTimeWeight) // Standard Error: 157_000 - .saturating_add((73_728_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(73_728_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1869,11 +1869,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (208_860_000 as Weight) + Weight::from_ref_time(208_860_000 as RefTimeWeight) // Standard Error: 25_000 - .saturating_add((245_718_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(245_718_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1881,11 +1881,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - (206_165_000 as Weight) + Weight::from_ref_time(206_165_000 as RefTimeWeight) // Standard Error: 138_000 - .saturating_add((51_644_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(51_644_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1893,11 +1893,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (255_955_000 as Weight) + Weight::from_ref_time(255_955_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((95_090_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(95_090_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1905,11 +1905,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - (208_153_000 as Weight) + Weight::from_ref_time(208_153_000 as RefTimeWeight) // Standard Error: 140_000 - .saturating_add((51_264_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(51_264_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1917,11 +1917,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (278_368_000 as Weight) + Weight::from_ref_time(278_368_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((95_006_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(95_006_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1929,11 +1929,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - (331_955_000 as Weight) + Weight::from_ref_time(331_955_000 as RefTimeWeight) // Standard Error: 1_155_000 - .saturating_add((3_069_955_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(3_069_955_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1941,11 +1941,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - (207_838_000 as Weight) + Weight::from_ref_time(207_838_000 as RefTimeWeight) // Standard Error: 783_000 - .saturating_add((2_058_503_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_058_503_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1954,316 +1954,316 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_567_000 - .saturating_add((774_380_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes((79 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(774_380_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - (73_955_000 as Weight) + Weight::from_ref_time(73_955_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((612_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(612_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - (74_057_000 as Weight) + Weight::from_ref_time(74_057_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - (74_137_000 as Weight) + Weight::from_ref_time(74_137_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_427_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - (73_844_000 as Weight) + Weight::from_ref_time(73_844_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_773_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_773_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - (73_979_000 as Weight) + Weight::from_ref_time(73_979_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_952_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_952_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - (73_924_000 as Weight) + Weight::from_ref_time(73_924_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((941_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(941_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - (73_574_000 as Weight) + Weight::from_ref_time(73_574_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_439_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_439_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - (73_343_000 as Weight) + Weight::from_ref_time(73_343_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_603_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_603_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - (76_267_000 as Weight) + Weight::from_ref_time(76_267_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - (74_877_000 as Weight) + Weight::from_ref_time(74_877_000 as RefTimeWeight) // Standard Error: 12_000 - .saturating_add((7_144_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(7_144_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - (88_665_000 as Weight) + Weight::from_ref_time(88_665_000 as RefTimeWeight) // Standard Error: 20_000 - .saturating_add((9_142_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(9_142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (98_600_000 as Weight) + Weight::from_ref_time(98_600_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((469_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(Weight::from_ref_time(469_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - (74_555_000 as Weight) + Weight::from_ref_time(74_555_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((624_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(624_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - (74_329_000 as Weight) + Weight::from_ref_time(74_329_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((688_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(688_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - (74_612_000 as Weight) + Weight::from_ref_time(74_612_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((909_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(909_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - (76_906_000 as Weight) + Weight::from_ref_time(76_906_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_192_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_192_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - (76_979_000 as Weight) + Weight::from_ref_time(76_979_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_361_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_361_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - (74_370_000 as Weight) + Weight::from_ref_time(74_370_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((661_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(661_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - (73_584_000 as Weight) + Weight::from_ref_time(73_584_000 as RefTimeWeight) // Standard Error: 353_000 - .saturating_add((187_114_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(187_114_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - (74_206_000 as Weight) + Weight::from_ref_time(74_206_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((884_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(884_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - (73_992_000 as Weight) + Weight::from_ref_time(73_992_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((893_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(893_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - (73_985_000 as Weight) + Weight::from_ref_time(73_985_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((891_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(891_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - (74_117_000 as Weight) + Weight::from_ref_time(74_117_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((901_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(901_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - (73_981_000 as Weight) + Weight::from_ref_time(73_981_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((866_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(866_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - (74_104_000 as Weight) + Weight::from_ref_time(74_104_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((868_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - (74_293_000 as Weight) + Weight::from_ref_time(74_293_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((878_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(878_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - (74_055_000 as Weight) + Weight::from_ref_time(74_055_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - (73_710_000 as Weight) + Weight::from_ref_time(73_710_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - (73_917_000 as Weight) + Weight::from_ref_time(73_917_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_355_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_355_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - (74_048_000 as Weight) + Weight::from_ref_time(74_048_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - (74_029_000 as Weight) + Weight::from_ref_time(74_029_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_349_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_349_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - (74_267_000 as Weight) + Weight::from_ref_time(74_267_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - (73_952_000 as Weight) + Weight::from_ref_time(73_952_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - (73_851_000 as Weight) + Weight::from_ref_time(73_851_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_368_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_368_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - (74_034_000 as Weight) + Weight::from_ref_time(74_034_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_348_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_348_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - (73_979_000 as Weight) + Weight::from_ref_time(73_979_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - (74_000_000 as Weight) + Weight::from_ref_time(74_000_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_328_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_328_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - (73_883_000 as Weight) + Weight::from_ref_time(73_883_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_331_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_331_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - (74_216_000 as Weight) + Weight::from_ref_time(74_216_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - (73_989_000 as Weight) + Weight::from_ref_time(73_989_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_998_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_998_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - (73_857_000 as Weight) + Weight::from_ref_time(73_857_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((2_073_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_073_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - (73_801_000 as Weight) + Weight::from_ref_time(73_801_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((2_027_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_027_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - (74_130_000 as Weight) + Weight::from_ref_time(74_130_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((2_064_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_064_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - (74_071_000 as Weight) + Weight::from_ref_time(74_071_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_327_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_327_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - (74_201_000 as Weight) + Weight::from_ref_time(74_201_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((1_330_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_330_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - (74_241_000 as Weight) + Weight::from_ref_time(74_241_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_321_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_321_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - (74_331_000 as Weight) + Weight::from_ref_time(74_331_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add((1_347_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_347_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - (73_674_000 as Weight) + Weight::from_ref_time(73_674_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_359_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_359_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - (73_807_000 as Weight) + Weight::from_ref_time(73_807_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - (73_725_000 as Weight) + Weight::from_ref_time(73_725_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - (73_755_000 as Weight) + Weight::from_ref_time(73_755_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } } diff --git a/frame/conviction-voting/src/benchmarking.rs b/frame/conviction-voting/src/benchmarking.rs index 53ac7a07302f9..117bb7fe22989 100644 --- a/frame/conviction-voting/src/benchmarking.rs +++ b/frame/conviction-voting/src/benchmarking.rs @@ -20,7 +20,7 @@ use super::*; use assert_matches::assert_matches; -use frame_benchmarking::{account, benchmarks, whitelist_account}; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account}; use frame_support::{ dispatch::RawOrigin, traits::{fungible, Currency, Get}, @@ -34,8 +34,9 @@ const SEED: u32 = 0; /// Fill all classes as much as possible up to `MaxVotes` and return the Class with the most votes /// ongoing. -fn fill_voting() -> (ClassOf, BTreeMap, Vec>>) { - let mut r = BTreeMap::, Vec>>::new(); +fn fill_voting, I: 'static>( +) -> (ClassOf, BTreeMap, Vec>>) { + let mut r = BTreeMap::, Vec>>::new(); for class in T::Polls::classes().into_iter() { for _ in 0..T::MaxVotes::get() { match T::Polls::create_ongoing(class.clone()) { @@ -48,34 +49,34 @@ fn fill_voting() -> (ClassOf, BTreeMap, Vec> (c, r) } -fn funded_account(name: &'static str, index: u32) -> T::AccountId { +fn funded_account, I: 'static>(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); caller } -fn account_vote(b: BalanceOf) -> AccountVote> { +fn account_vote, I: 'static>(b: BalanceOf) -> AccountVote> { let v = Vote { aye: true, conviction: Conviction::Locked1x }; AccountVote::Standard { vote: v, balance: b } } -benchmarks! { +benchmarks_instance_pallet! { where_clause { where T::MaxVotes: core::fmt::Debug } vote_new { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let account_vote = account_vote::(100u32.into()); + let account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len() - 1; // We need to create existing votes for i in polls.iter().skip(1) { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; @@ -85,52 +86,52 @@ benchmarks! { }: vote(RawOrigin::Signed(caller.clone()), index, account_vote) verify { assert_matches!( - VotingFor::::get(&caller, &class), + VotingFor::::get(&caller, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == (r + 1) as usize ); } vote_existing { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let old_account_vote = account_vote::(100u32.into()); + let old_account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len(); // We need to create existing votes for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), r, "Votes were not recorded."); - let new_account_vote = account_vote::(200u32.into()); + let new_account_vote = account_vote::(200u32.into()); let index = polls[0]; }: vote(RawOrigin::Signed(caller.clone()), index, new_account_vote) verify { assert_matches!( - VotingFor::::get(&caller, &class), + VotingFor::::get(&caller, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); } remove_vote { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let old_account_vote = account_vote::(100u32.into()); + let old_account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len(); // We need to create existing votes for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; @@ -140,25 +141,26 @@ benchmarks! { }: _(RawOrigin::Signed(caller.clone()), Some(class.clone()), index) verify { assert_matches!( - VotingFor::::get(&caller, &class), + VotingFor::::get(&caller, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == (r - 1) as usize ); } remove_other_vote { - let caller = funded_account::("caller", 0); - let voter = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); + let voter = funded_account::("caller", 0); + let voter_lookup = T::Lookup::unlookup(voter.clone()); whitelist_account!(caller); - let old_account_vote = account_vote::(100u32.into()); + let old_account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len(); // We need to create existing votes for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, old_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, old_account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; @@ -166,10 +168,10 @@ benchmarks! { let index = polls[0]; assert!(T::Polls::end_ongoing(index, false).is_ok()); - }: _(RawOrigin::Signed(caller.clone()), voter.clone(), class.clone(), index) + }: _(RawOrigin::Signed(caller.clone()), voter_lookup, class.clone(), index) verify { assert_matches!( - VotingFor::::get(&voter, &class), + VotingFor::::get(&voter, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == (r - 1) as usize ); } @@ -177,78 +179,81 @@ benchmarks! { delegate { let r in 0 .. T::MaxVotes::get().min(T::Polls::max_ongoing().1); - let all_polls = fill_voting::().1; + let all_polls = fill_voting::().1; let class = T::Polls::max_ongoing().0; let polls = &all_polls[&class]; - let voter = funded_account::("voter", 0); - let caller = funded_account::("caller", 0); + let voter = funded_account::("voter", 0); + let voter_lookup = T::Lookup::unlookup(voter.clone()); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let delegated_balance: BalanceOf = 1000u32.into(); - let delegate_vote = account_vote::(delegated_balance); + let delegated_balance: BalanceOf = 1000u32.into(); + let delegate_vote = account_vote::(delegated_balance); // We need to create existing delegations for i in polls.iter().take(r as usize) { - ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; } assert_matches!( - VotingFor::::get(&voter, &class), + VotingFor::::get(&voter, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); - }: _(RawOrigin::Signed(caller.clone()), class.clone(), voter, Conviction::Locked1x, delegated_balance) + }: _(RawOrigin::Signed(caller.clone()), class.clone(), voter_lookup, Conviction::Locked1x, delegated_balance) verify { - assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); + assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); } undelegate { let r in 0 .. T::MaxVotes::get().min(T::Polls::max_ongoing().1); - let all_polls = fill_voting::().1; + let all_polls = fill_voting::().1; let class = T::Polls::max_ongoing().0; let polls = &all_polls[&class]; - let voter = funded_account::("voter", 0); - let caller = funded_account::("caller", 0); + let voter = funded_account::("voter", 0); + let voter_lookup = T::Lookup::unlookup(voter.clone()); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let delegated_balance: BalanceOf = 1000u32.into(); - let delegate_vote = account_vote::(delegated_balance); + let delegated_balance: BalanceOf = 1000u32.into(); + let delegate_vote = account_vote::(delegated_balance); - ConvictionVoting::::delegate( + ConvictionVoting::::delegate( RawOrigin::Signed(caller.clone()).into(), class.clone(), - voter.clone(), + voter_lookup, Conviction::Locked1x, delegated_balance, )?; // We need to create delegations for i in polls.iter().take(r as usize) { - ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; } assert_matches!( - VotingFor::::get(&voter, &class), + VotingFor::::get(&voter, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); - assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); + assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); }: _(RawOrigin::Signed(caller.clone()), class.clone()) verify { - assert_matches!(VotingFor::::get(&caller, &class), Voting::Casting(_)); + assert_matches!(VotingFor::::get(&caller, &class), Voting::Casting(_)); } unlock { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); + let caller_lookup = T::Lookup::unlookup(caller.clone()); whitelist_account!(caller); - let normal_account_vote = account_vote::(T::Currency::free_balance(&caller) - 100u32.into()); - let big_account_vote = account_vote::(T::Currency::free_balance(&caller)); + let normal_account_vote = account_vote::(T::Currency::free_balance(&caller) - 100u32.into()); + let big_account_vote = account_vote::(T::Currency::free_balance(&caller)); // Fill everything up to the max by filling all classes with votes and voting on them all. - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); assert!(all_polls.len() > 0); for (class, polls) in all_polls.iter() { assert!(polls.len() > 0); for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, normal_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, normal_account_vote)?; } } @@ -257,15 +262,15 @@ benchmarks! { // Vote big on the class with the most ongoing votes of them to bump the lock and make it // hard to recompute when removed. - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), polls[0], big_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), polls[0], big_account_vote)?; let now_usable = >::reducible_balance(&caller, false); assert_eq!(orig_usable - now_usable, 100u32.into()); // Remove the vote - ConvictionVoting::::remove_vote(RawOrigin::Signed(caller.clone()).into(), Some(class.clone()), polls[0])?; + ConvictionVoting::::remove_vote(RawOrigin::Signed(caller.clone()).into(), Some(class.clone()), polls[0])?; // We can now unlock on `class` from 200 to 100... - }: _(RawOrigin::Signed(caller.clone()), class, caller.clone()) + }: _(RawOrigin::Signed(caller.clone()), class, caller_lookup) verify { assert_eq!(orig_usable, >::reducible_balance(&caller, false)); } diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index 984f1c0b3a593..857878f190667 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -36,7 +36,7 @@ use frame_support::{ }, }; use sp_runtime::{ - traits::{AtLeast32BitUnsigned, Saturating, Zero}, + traits::{AtLeast32BitUnsigned, Saturating, StaticLookup, Zero}, ArithmeticError, Perbill, }; use sp_std::prelude::*; @@ -62,6 +62,7 @@ pub mod benchmarking; const CONVICTION_VOTING_ID: LockIdentifier = *b"pyconvot"; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; type VotingOf = Voting< @@ -245,11 +246,12 @@ pub mod pallet { pub fn delegate( origin: OriginFor, class: ClassOf, - to: T::AccountId, + to: AccountIdLookupOf, conviction: Conviction, balance: BalanceOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; + let to = T::Lookup::lookup(to)?; let votes = Self::try_delegate(who, class, to, conviction, balance)?; Ok(Some(T::WeightInfo::delegate(votes)).into()) @@ -294,9 +296,10 @@ pub mod pallet { pub fn unlock( origin: OriginFor, class: ClassOf, - target: T::AccountId, + target: AccountIdLookupOf, ) -> DispatchResult { ensure_signed(origin)?; + let target = T::Lookup::lookup(target)?; Self::update_lock(&class, &target); Ok(()) } @@ -359,11 +362,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_other_vote())] pub fn remove_other_vote( origin: OriginFor, - target: T::AccountId, + target: AccountIdLookupOf, class: ClassOf, index: PollIndexOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let target = T::Lookup::lookup(target)?; let scope = if target == who { UnvoteScope::Any } else { UnvoteScope::OnlyExpired }; Self::try_remove_vote(&target, index, Some(class), scope)?; Ok(()) diff --git a/frame/conviction-voting/src/tests.rs b/frame/conviction-voting/src/tests.rs index 9eb7f679efca3..cbd2b0619ac2b 100644 --- a/frame/conviction-voting/src/tests.rs +++ b/frame/conviction-voting/src/tests.rs @@ -22,6 +22,7 @@ use std::collections::BTreeMap; use frame_support::{ assert_noop, assert_ok, parameter_types, traits::{ConstU32, ConstU64, Contains, Polling, VoteTally}, + weights::Weight, }; use sp_core::H256; use sp_runtime::{ @@ -57,7 +58,7 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1_000_000); + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1_000_000)); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/conviction-voting/src/types.rs b/frame/conviction-voting/src/types.rs index 8f4f3697e9766..d6051dff62569 100644 --- a/frame/conviction-voting/src/types.rs +++ b/frame/conviction-voting/src/types.rs @@ -93,6 +93,9 @@ impl< let ayes = approval.mul_ceil(support); Self { ayes, nays: support - ayes, support, dummy: PhantomData } } + + #[cfg(feature = "runtime-benchmarks")] + fn setup(_: Class, _: Perbill) {} } impl< diff --git a/frame/conviction-voting/src/weights.rs b/frame/conviction-voting/src/weights.rs index 330d02755cb8b..10c5c975a81f1 100644 --- a/frame/conviction-voting/src/weights.rs +++ b/frame/conviction-voting/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_conviction_voting. @@ -62,9 +62,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_new() -> Weight { - (148_804_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(148_804_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: ConvictionVoting VotingFor (r:1 w:1) @@ -72,24 +72,24 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_existing() -> Weight { - (313_333_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(313_333_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn remove_vote() -> Weight { - (300_591_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(300_591_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:0) fn remove_other_vote() -> Weight { - (53_887_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(53_887_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) @@ -97,33 +97,33 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn delegate(r: u32, ) -> Weight { - (51_518_000 as Weight) + Weight::from_ref_time(51_518_000 as RefTimeWeight) // Standard Error: 83_000 - .saturating_add((27_235_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(27_235_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn undelegate(r: u32, ) -> Weight { - (37_885_000 as Weight) + Weight::from_ref_time(37_885_000 as RefTimeWeight) // Standard Error: 75_000 - .saturating_add((24_395_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(24_395_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn unlock() -> Weight { - (67_703_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(67_703_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } } @@ -135,9 +135,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_new() -> Weight { - (148_804_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(148_804_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: ConvictionVoting VotingFor (r:1 w:1) @@ -145,24 +145,24 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_existing() -> Weight { - (313_333_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(313_333_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn remove_vote() -> Weight { - (300_591_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(300_591_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:0) fn remove_other_vote() -> Weight { - (53_887_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(53_887_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) @@ -170,32 +170,32 @@ impl WeightInfo for () { // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn delegate(r: u32, ) -> Weight { - (51_518_000 as Weight) + Weight::from_ref_time(51_518_000 as RefTimeWeight) // Standard Error: 83_000 - .saturating_add((27_235_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(27_235_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn undelegate(r: u32, ) -> Weight { - (37_885_000 as Weight) + Weight::from_ref_time(37_885_000 as RefTimeWeight) // Standard Error: 75_000 - .saturating_add((24_395_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(24_395_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn unlock() -> Weight { - (67_703_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(67_703_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } } diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index ef0c15ec97e12..396c9781aefab 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -35,7 +35,9 @@ const SEED: u32 = 0; fn funded_account(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + // Give the account half of the maximum value of the `Balance` type. + // Otherwise some transfers will fail with an overflow error. + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); caller } @@ -291,7 +293,9 @@ benchmarks! { for i in 0 .. T::MaxProposals::get() { add_proposal::(i)?; } - }: _(RawOrigin::Root, 0) + + let cancel_origin = T::CancelProposalOrigin::successful_origin(); + }: _(cancel_origin, 0) cancel_referendum { let ref_index = add_referendum::(0).0; @@ -440,9 +444,10 @@ benchmarks! { let caller = funded_account::("caller", 0); // Caller will initially delegate to `old_delegate` let old_delegate: T::AccountId = funded_account::("old_delegate", r); + let old_delegate_lookup = T::Lookup::unlookup(old_delegate.clone()); Democracy::::delegate( RawOrigin::Signed(caller.clone()).into(), - old_delegate.clone(), + old_delegate_lookup, Conviction::Locked1x, delegated_balance, )?; @@ -454,6 +459,7 @@ benchmarks! { assert_eq!(balance, delegated_balance, "delegation balance didn't work"); // Caller will now switch to `new_delegate` let new_delegate: T::AccountId = funded_account::("new_delegate", r); + let new_delegate_lookup = T::Lookup::unlookup(new_delegate.clone()); let account_vote = account_vote::(initial_balance); // We need to create existing direct votes for the `new_delegate` for i in 0..r { @@ -466,7 +472,7 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes were not recorded."); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), new_delegate.clone(), Conviction::Locked1x, delegated_balance) + }: _(RawOrigin::Signed(caller.clone()), new_delegate_lookup, Conviction::Locked1x, delegated_balance) verify { let (target, balance) = match VotingOf::::get(&caller) { Voting::Delegating { target, balance, .. } => (target, balance), @@ -490,9 +496,10 @@ benchmarks! { let caller = funded_account::("caller", 0); // Caller will delegate let the_delegate: T::AccountId = funded_account::("delegate", r); + let the_delegate_lookup = T::Lookup::unlookup(the_delegate.clone()); Democracy::::delegate( RawOrigin::Signed(caller.clone()).into(), - the_delegate.clone(), + the_delegate_lookup, Conviction::Locked1x, delegated_balance, )?; @@ -537,6 +544,7 @@ benchmarks! { let r in 0 .. (T::MaxVotes::get() - 1); let locker = funded_account::("locker", 0); + let locker_lookup = T::Lookup::unlookup(locker.clone()); // Populate votes so things are locked let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); @@ -549,7 +557,7 @@ benchmarks! { let caller = funded_account::("caller", 0); whitelist_account!(caller); - }: unlock(RawOrigin::Signed(caller), locker.clone()) + }: unlock(RawOrigin::Signed(caller), locker_lookup) verify { // Note that we may want to add a `get_lock` api to actually verify let voting = VotingOf::::get(&locker); @@ -561,6 +569,7 @@ benchmarks! { let r in 0 .. (T::MaxVotes::get() - 1); let locker = funded_account::("locker", 0); + let locker_lookup = T::Lookup::unlookup(locker.clone()); // Populate votes so things are locked let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); @@ -587,7 +596,7 @@ benchmarks! { let caller = funded_account::("caller", 0); whitelist_account!(caller); - }: unlock(RawOrigin::Signed(caller), locker.clone()) + }: unlock(RawOrigin::Signed(caller), locker_lookup) verify { let votes = match VotingOf::::get(&locker) { Voting::Direct { votes, .. } => votes, @@ -633,6 +642,7 @@ benchmarks! { let r in 1 .. T::MaxVotes::get(); let caller = funded_account::("caller", r); + let caller_lookup = T::Lookup::unlookup(caller.clone()); let account_vote = account_vote::(100u32.into()); for i in 0 .. r { @@ -648,7 +658,7 @@ benchmarks! { let ref_index = r - 1; whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), caller.clone(), ref_index) + }: _(RawOrigin::Signed(caller.clone()), caller_lookup, ref_index) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 3cbf845d3ac4c..562182ad311fc 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -164,8 +164,10 @@ use frame_support::{ weights::Weight, }; use sp_runtime::{ - traits::{Bounded as ArithBounded, One, Saturating, Zero}, - ArithmeticError, DispatchError, DispatchResult, + traits::{ + Bounded, Bounded as ArithBounded, Dispatchable, Hash, One, Saturating, StaticLookup, Zero, + }, + ArithmeticError, DispatchError, DispatchResult, RuntimeDebug, }; use sp_std::prelude::*; @@ -204,6 +206,7 @@ type NegativeImbalanceOf = <::Currency as Currency< >>::NegativeImbalance; pub type CallOf = ::Call; pub type BoundedCallOf = Bounded>; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[frame_support::pallet] pub mod pallet { @@ -525,6 +528,8 @@ pub mod pallet { MaxVotesReached, /// Maximum number of items reached. TooMany, + /// Voting period too low + VotingPeriodLow, } #[pallet::hooks] @@ -719,8 +724,9 @@ pub mod pallet { /// The dispatch of this call must be `FastTrackOrigin`. /// /// - `proposal_hash`: The hash of the current external proposal. - /// - `voting_period`: The period that is allowed for voting on this proposal. Increased to - /// `FastTrackVotingPeriod` if too low. + /// - `voting_period`: The period that is allowed for voting on this proposal. + /// Must be always greater than zero. + /// For `FastTrackOrigin` must be equal or greater than `FastTrackVotingPeriod`. /// - `delay`: The number of block after voting has ended in approval and this should be /// enacted. This doesn't have a minimum amount. /// @@ -749,8 +755,8 @@ pub mod pallet { T::InstantOrigin::ensure_origin(ensure_instant)?; ensure!(T::InstantAllowed::get(), Error::::InstantNotAllowed); } - - let (ext_proposal, threshold) = + ensure!(voting_period > T::BlockNumber::zero(), Error::::VotingPeriodLow); + let (e_proposal_hash, threshold) = >::get().ok_or(Error::::ProposalMissing)?; ensure!( threshold != VoteThreshold::SuperMajorityApprove, @@ -847,11 +853,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::delegate(T::MaxVotes::get()))] pub fn delegate( origin: OriginFor, - to: T::AccountId, + to: AccountIdLookupOf, conviction: Conviction, balance: BalanceOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; + let to = T::Lookup::lookup(to)?; let votes = Self::try_delegate(who, to, conviction, balance)?; Ok(Some(T::WeightInfo::delegate(votes)).into()) @@ -898,8 +905,9 @@ pub mod pallet { /// /// Weight: `O(R)` with R number of vote of target. #[pallet::weight(T::WeightInfo::unlock_set().max(T::WeightInfo::unlock_remove()))] - pub fn unlock(origin: OriginFor, target: T::AccountId) -> DispatchResult { + pub fn unlock(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { ensure_signed(origin)?; + let target = T::Lookup::lookup(target)?; Self::update_lock(&target); Ok(()) } @@ -955,10 +963,11 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_other_vote())] pub fn remove_other_vote( origin: OriginFor, - target: T::AccountId, + target: AccountIdLookupOf, index: ReferendumIndex, ) -> DispatchResult { let who = ensure_signed(origin)?; + let target = T::Lookup::lookup(target)?; let scope = if target == who { UnvoteScope::Any } else { UnvoteScope::OnlyExpired }; Self::try_remove_vote(&target, index, scope)?; Ok(()) @@ -1498,7 +1507,7 @@ impl Pallet { /// # fn begin_block(now: T::BlockNumber) -> Weight { let max_block_weight = T::BlockWeights::get().max_block; - let mut weight = 0; + let mut weight = Weight::new(); let next = Self::lowest_unbaked(); let last = Self::referendum_count(); diff --git a/frame/democracy/src/migrations.rs b/frame/democracy/src/migrations.rs new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/frame/democracy/src/migrations.rs @@ -0,0 +1 @@ + diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 01ded5b3e9640..afb6053b383be 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -77,7 +77,7 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1_000_000_000_000); + frame_system::limits::BlockWeights::simple_max(frame_support::WEIGHT_PER_SECOND); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/democracy/src/tests/external_proposing.rs b/frame/democracy/src/tests/external_proposing.rs index b508b44129815..a939491d73293 100644 --- a/frame/democracy/src/tests/external_proposing.rs +++ b/frame/democracy/src/tests/external_proposing.rs @@ -27,7 +27,7 @@ fn veto_external_works() { assert!(>::exists()); let h = set_balance_proposal(2).hash(); - assert_ok!(Democracy::veto_external(Origin::signed(3), h.clone())); + assert_ok!(Democracy::veto_external(Origin::signed(3), h)); // cancelled. assert!(!>::exists()); // fails - same proposal can't be resubmitted. @@ -49,13 +49,10 @@ fn veto_external_works() { assert!(>::exists()); // 3 can't veto the same thing twice. - assert_noop!( - Democracy::veto_external(Origin::signed(3), h.clone()), - Error::::AlreadyVetoed - ); + assert_noop!(Democracy::veto_external(Origin::signed(3), h), Error::::AlreadyVetoed); // 4 vetoes. - assert_ok!(Democracy::veto_external(Origin::signed(4), h.clone())); + assert_ok!(Democracy::veto_external(Origin::signed(4), h)); // cancelled again. assert!(!>::exists()); diff --git a/frame/democracy/src/tests/fast_tracking.rs b/frame/democracy/src/tests/fast_tracking.rs index 2d70836a2a849..3db91f83a5a88 100644 --- a/frame/democracy/src/tests/fast_tracking.rs +++ b/frame/democracy/src/tests/fast_tracking.rs @@ -67,6 +67,10 @@ fn instant_referendum_works() { Error::::InstantNotAllowed ); INSTANT_ALLOWED.with(|v| *v.borrow_mut() = true); + assert_noop!( + Democracy::fast_track(Origin::signed(6), h, 0, 0), + Error::::VotingPeriodLow + ); assert_ok!(Democracy::fast_track(Origin::signed(6), h, 1, 0)); assert_eq!( Democracy::referendum_status(0), @@ -81,6 +85,60 @@ fn instant_referendum_works() { }); } +#[test] +fn instant_next_block_referendum_backed() { + new_test_ext().execute_with(|| { + // arrange + let start_block_number = 10; + let majority_origin_id = 3; + let instant_origin_id = 6; + let voting_period = 1; + let proposal_hash = set_balance_proposal_hash_and_note(2); + let delay = 2; // has no effect on test + + // init + System::set_block_number(start_block_number); + InstantAllowed::set(true); + + // propose with majority origin + assert_ok!(Democracy::external_propose_majority( + Origin::signed(majority_origin_id), + proposal_hash + )); + + // fast track with instant origin and voting period pointing to the next block + assert_ok!(Democracy::fast_track( + Origin::signed(instant_origin_id), + proposal_hash, + voting_period, + delay + )); + + // fetch the status of the only referendum at index 0 + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: start_block_number + voting_period, + proposal_hash, + threshold: VoteThreshold::SimpleMajority, + delay, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + + // referendum expected to be baked with the start of the next block + next_block(); + + // assert no active referendums + assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); + // the only referendum in the storage is finished and not approved + assert_eq!( + ReferendumInfoOf::::get(0).unwrap(), + ReferendumInfo::Finished { approved: false, end: start_block_number + voting_period } + ); + }); +} + #[test] fn fast_track_referendum_fails_when_no_simple_majority() { new_test_ext().execute_with(|| { diff --git a/frame/democracy/src/tests/public_proposals.rs b/frame/democracy/src/tests/public_proposals.rs index c38b04aff913c..b1a25df111b80 100644 --- a/frame/democracy/src/tests/public_proposals.rs +++ b/frame/democracy/src/tests/public_proposals.rs @@ -107,7 +107,7 @@ fn blacklisting_should_work() { assert_ok!(propose_set_balance(1, 2, 2)); assert_ok!(propose_set_balance(1, 4, 4)); - assert_noop!(Democracy::blacklist(Origin::signed(1), hash.clone(), None), BadOrigin); + assert_noop!(Democracy::blacklist(Origin::signed(1), hash, None), BadOrigin); assert_ok!(Democracy::blacklist(Origin::root(), hash, None)); assert_eq!(Democracy::backing_for(0), None); diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index f1fce9a9a7b20..351ed42cca8e9 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -39,34 +39,37 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_democracy. pub trait WeightInfo { fn propose() -> Weight; - fn second() -> Weight; - fn vote_new() -> Weight; - fn vote_existing() -> Weight; + fn second(s: u32, ) -> Weight; + fn vote_new(r: u32, ) -> Weight; + fn vote_existing(r: u32, ) -> Weight; fn emergency_cancel() -> Weight; - fn blacklist() -> Weight; - fn external_propose() -> Weight; + fn blacklist(p: u32, ) -> Weight; + fn external_propose(v: u32, ) -> Weight; fn external_propose_majority() -> Weight; fn external_propose_default() -> Weight; fn fast_track() -> Weight; - fn veto_external() -> Weight; - fn cancel_proposal() -> Weight; + fn veto_external(v: u32, ) -> Weight; + fn cancel_proposal(p: u32, ) -> Weight; fn cancel_referendum() -> Weight; - fn cancel_queued() -> Weight; - fn on_initialize_base(r: u32,) -> Weight; - fn on_initialize_base_with_launch_period(r: u32,) -> Weight; - fn delegate(v: u32) -> Weight; - fn undelegate(v: u32) -> Weight; + fn cancel_queued(r: u32, ) -> Weight; + fn on_initialize_base(r: u32, ) -> Weight; + fn on_initialize_base_with_launch_period(r: u32, ) -> Weight; + fn delegate(r: u32, ) -> Weight; + fn undelegate(r: u32, ) -> Weight; fn clear_public_proposals() -> Weight; - fn unlock_remove() -> Weight; - fn unlock_set() -> Weight; - fn remove_vote() -> Weight; - fn remove_other_vote() -> Weight; + fn note_preimage(b: u32, ) -> Weight; + fn note_imminent_preimage(b: u32, ) -> Weight; + fn reap_preimage(b: u32, ) -> Weight; + fn unlock_remove(r: u32, ) -> Weight; + fn unlock_set(r: u32, ) -> Weight; + fn remove_vote(r: u32, ) -> Weight; + fn remove_other_vote(r: u32, ) -> Weight; } /// Weights for pallet_democracy using the Substrate node and recommended hardware. @@ -77,43 +80,44 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - (48_328_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(48_328_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy DepositOf (r:1 w:1) - fn second() -> Weight { - (30_923_000 as Weight) + fn second(s: u32, ) -> Weight { + Weight::from_ref_time(30_923_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_new() -> Weight { - (40_345_000 as Weight) + fn vote_new(r: u32, ) -> Weight { + Weight::from_ref_time(40_345_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((140_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(140_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_existing() -> Weight { - (39_853_000 as Weight) + fn vote_existing(r: u32, ) -> Weight { + Weight::from_ref_time(39_853_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((150_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - (19_364_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(19_364_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy NextExternal (r:1 w:1) @@ -121,83 +125,83 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy Blacklist (r:0 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) - fn blacklist() -> Weight { - (57_708_000 as Weight) + fn blacklist(p: u32, ) -> Weight { + Weight::from_ref_time(57_708_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((192_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) - fn external_propose() -> Weight { - (10_714_000 as Weight) + fn external_propose(v: u32, ) -> Weight { + Weight::from_ref_time(10_714_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((33_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - (3_697_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_697_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - (3_831_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_831_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - (20_271_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(20_271_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) - fn veto_external() -> Weight { - (21_319_000 as Weight) + fn veto_external(v: u32, ) -> Weight { + Weight::from_ref_time(21_319_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((52_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) - fn cancel_proposal() -> Weight { - (43_960_000 as Weight) + fn cancel_proposal(p: u32, ) -> Weight { + Weight::from_ref_time(43_960_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((184_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(184_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - (13_475_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(13_475_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) - fn cancel_queued() -> Weight { - (24_320_000 as Weight) + fn cancel_queued(r: u32, ) -> Weight { + Weight::from_ref_time(24_320_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((560_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(560_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) - fn on_initialize_base(_r: u32,) -> Weight { - (3_428_000 as Weight) + fn on_initialize_base(r: u32, ) -> Weight { + Weight::from_ref_time(3_428_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((3_171_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(3_171_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) @@ -205,79 +209,104 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy NextExternal (r:1 w:0) // Storage: Democracy PublicProps (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) - fn on_initialize_base_with_launch_period(_r: u32,) -> Weight { - (7_867_000 as Weight) + fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { + Weight::from_ref_time(7_867_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((3_177_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(3_177_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy VotingOf (r:3 w:3) // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn delegate(_v: u32,) -> Weight { - (37_902_000 as Weight) + fn delegate(r: u32, ) -> Weight { + Weight::from_ref_time(37_902_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((4_335_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight))) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight))) + .saturating_add(Weight::from_ref_time(4_335_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Democracy VotingOf (r:2 w:2) // Storage: Democracy ReferendumInfoOf (r:1 w:1) - fn undelegate(_v: u32,) -> Weight { - (21_272_000 as Weight) + fn undelegate(r: u32, ) -> Weight { + Weight::from_ref_time(21_272_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((4_351_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight))) + .saturating_add(Weight::from_ref_time(4_351_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - (4_913_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(4_913_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + } + // Storage: Democracy Preimages (r:1 w:1) + fn note_preimage(b: u32, ) -> Weight { + Weight::from_ref_time(27_986_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + } + // Storage: Democracy Preimages (r:1 w:1) + fn note_imminent_preimage(b: u32, ) -> Weight { + Weight::from_ref_time(20_058_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + } + // Storage: Democracy Preimages (r:1 w:1) + // Storage: System Account (r:1 w:0) + fn reap_preimage(b: u32, ) -> Weight { + Weight::from_ref_time(28_619_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - fn unlock_remove() -> Weight { - (26_619_000 as Weight) + fn unlock_remove(r: u32, ) -> Weight { + Weight::from_ref_time(26_619_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((56_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - fn unlock_set() -> Weight { - (25_373_000 as Weight) + fn unlock_set(r: u32, ) -> Weight { + Weight::from_ref_time(25_373_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((142_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) - fn remove_vote() -> Weight { - (15_961_000 as Weight) + fn remove_vote(r: u32, ) -> Weight { + Weight::from_ref_time(15_961_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((115_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) - fn remove_other_vote() -> Weight { - (15_992_000 as Weight) + fn remove_other_vote(r: u32, ) -> Weight { + Weight::from_ref_time(15_992_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((113_000 as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(113_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -288,44 +317,44 @@ impl WeightInfo for () { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - (48_328_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(48_328_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy DepositOf (r:1 w:1) - fn second() -> Weight { - (30_923_000 as Weight) + fn second(s: u32, ) -> Weight { + Weight::from_ref_time(30_923_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((142_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_new() -> Weight { - (40_345_000 as Weight) + fn vote_new(r: u32, ) -> Weight { + Weight::from_ref_time(40_345_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((140_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(140_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_existing() -> Weight { - (39_853_000 as Weight) + fn vote_existing(r: u32, ) -> Weight { + Weight::from_ref_time(39_853_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((150_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - (19_364_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(19_364_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy NextExternal (r:1 w:1) @@ -333,83 +362,83 @@ impl WeightInfo for () { // Storage: Democracy Blacklist (r:0 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) - fn blacklist() -> Weight { - (57_708_000 as Weight) + fn blacklist(p: u32, ) -> Weight { + Weight::from_ref_time(57_708_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((192_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) - fn external_propose() -> Weight { - (10_714_000 as Weight) + fn external_propose(v: u32, ) -> Weight { + Weight::from_ref_time(10_714_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((33_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - (3_697_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_697_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - (3_831_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_831_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - (20_271_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(20_271_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) - fn veto_external() -> Weight { - (21_319_000 as Weight) + fn veto_external(v: u32, ) -> Weight { + Weight::from_ref_time(21_319_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((52_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) - fn cancel_proposal() -> Weight { - (43_960_000 as Weight) + fn cancel_proposal(p: u32, ) -> Weight { + Weight::from_ref_time(43_960_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((184_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(184_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - (13_475_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(13_475_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) - fn cancel_queued() -> Weight { - (24_320_000 as Weight) + fn cancel_queued(r: u32, ) -> Weight { + Weight::from_ref_time(24_320_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((560_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(560_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) - fn on_initialize_base(_r: u32,) -> Weight { - (3_428_000 as Weight) + fn on_initialize_base(r: u32, ) -> Weight { + Weight::from_ref_time(3_428_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((3_171_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(3_171_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) @@ -417,78 +446,103 @@ impl WeightInfo for () { // Storage: Democracy NextExternal (r:1 w:0) // Storage: Democracy PublicProps (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) - fn on_initialize_base_with_launch_period(_r: u32,) -> Weight { - (7_867_000 as Weight) + fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { + Weight::from_ref_time(7_867_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((3_177_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(3_177_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy VotingOf (r:3 w:3) // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn delegate(_v: u32,) -> Weight { - (37_902_000 as Weight) + fn delegate(r: u32, ) -> Weight { + Weight::from_ref_time(37_902_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((4_335_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight))) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight))) + .saturating_add(Weight::from_ref_time(4_335_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Democracy VotingOf (r:2 w:2) // Storage: Democracy ReferendumInfoOf (r:1 w:1) - fn undelegate(_v: u32,) -> Weight { - (21_272_000 as Weight) + fn undelegate(r: u32, ) -> Weight { + Weight::from_ref_time(21_272_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((4_351_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight))) + .saturating_add(Weight::from_ref_time(4_351_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - (4_913_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(4_913_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + } + // Storage: Democracy Preimages (r:1 w:1) + fn note_preimage(b: u32, ) -> Weight { + Weight::from_ref_time(27_986_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + } + // Storage: Democracy Preimages (r:1 w:1) + fn note_imminent_preimage(b: u32, ) -> Weight { + Weight::from_ref_time(20_058_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + } + // Storage: Democracy Preimages (r:1 w:1) + // Storage: System Account (r:1 w:0) + fn reap_preimage(b: u32, ) -> Weight { + Weight::from_ref_time(28_619_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - fn unlock_remove() -> Weight { - (26_619_000 as Weight) + fn unlock_remove(r: u32, ) -> Weight { + Weight::from_ref_time(26_619_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((56_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - fn unlock_set() -> Weight { - (25_373_000 as Weight) + fn unlock_set(r: u32, ) -> Weight { + Weight::from_ref_time(25_373_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((142_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) - fn remove_vote() -> Weight { - (15_961_000 as Weight) + fn remove_vote(r: u32, ) -> Weight { + Weight::from_ref_time(15_961_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((115_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) - fn remove_other_vote() -> Weight { - (15_992_000 as Weight) + fn remove_other_vote(r: u32, ) -> Weight { + Weight::from_ref_time(15_992_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((113_000 as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(113_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index 6c8e537a72c64..aea3b5cd6d2b0 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -36,16 +36,11 @@ frame-election-provider-support = { version = "4.0.0-dev", default-features = fa # Optional imports for benchmarking frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } pallet-election-provider-support-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../election-provider-support/benchmarking", optional = true } -rand = { version = "0.7.3", default-features = false, optional = true, features = [ - "alloc", - "small_rng", -] } -strum = { optional = true, default-features = false, version = "0.23.0", features = [ - "derive", -] } +rand = { version = "0.7.3", default-features = false, features = ["alloc", "small_rng"], optional = true } +strum = { version = "0.24.1", default-features = false, features = ["derive"], optional = true } [dev-dependencies] -parking_lot = "0.12.0" +parking_lot = "0.12.1" rand = { version = "0.7.3" } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", path = "../../primitives/io" } diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 2f1f6463df719..906de8a6c9d20 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -51,7 +51,7 @@ //! In the signed phase, solutions (of type [`RawSolution`]) are submitted and queued on chain. A //! deposit is reserved, based on the size of the solution, for the cost of keeping this solution //! on-chain for a number of blocks, and the potential weight of the solution upon being checked. A -//! maximum of `pallet::Config::MaxSignedSubmissions` solutions are stored. The queue is always +//! maximum of `pallet::Config::SignedMaxSubmissions` solutions are stored. The queue is always //! sorted based on score (worse to best). //! //! Upon arrival of a new solution: @@ -234,7 +234,6 @@ use frame_election_provider_support::{ ElectionDataProvider, ElectionProvider, InstantElectionProvider, NposSolution, }; use frame_support::{ - dispatch::DispatchResultWithPostInfo, ensure, traits::{Currency, Get, OnUnbalanced, ReservableCurrency}, weights::{DispatchClass, Weight}, @@ -877,7 +876,7 @@ pub mod pallet { origin: OriginFor, raw_solution: Box>>, witness: SolutionOrSnapshotSize, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { ensure_none(origin)?; let error_message = "Invalid unsigned submission must produce invalid block and \ deprive validator from their authoring reward."; @@ -905,7 +904,7 @@ pub mod pallet { prev_ejected: ejected_a_solution, }); - Ok(None.into()) + Ok(()) } /// Set a new value for `MinimumUntrustedScore`. @@ -992,7 +991,7 @@ pub mod pallet { let deposit = Self::deposit_for(&raw_solution, size); let call_fee = { let call = Call::submit { raw_solution: raw_solution.clone() }; - T::EstimateCallFee::estimate_call_fee(&call, None.into()) + T::EstimateCallFee::estimate_call_fee(&call, None::.into()) }; let submission = SignedSubmission { diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 7eff70b47eba5..72b3ec9764079 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -349,9 +349,11 @@ impl MinerConfig for Runtime { fn solution_weight(v: u32, t: u32, a: u32, d: u32) -> Weight { match MockWeightInfo::get() { - MockedWeightInfo::Basic => - (10 as Weight).saturating_add((5 as Weight).saturating_mul(a as Weight)), - MockedWeightInfo::Complex => (0 * v + 0 * t + 1000 * a + 0 * d) as Weight, + MockedWeightInfo::Basic => Weight::from_ref_time( + (10 as u64).saturating_add((5 as u64).saturating_mul(a as u64)), + ), + MockedWeightInfo::Complex => + Weight::from_ref_time((0 * v + 0 * t + 1000 * a + 0 * d) as u64), MockedWeightInfo::Real => <() as multi_phase::weights::WeightInfo>::feasibility_check(v, t, a, d), } diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index eca75139f925a..b9abfdfba14fb 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -514,8 +514,8 @@ impl Pallet { let feasibility_weight = Self::solution_weight_of(raw_solution, size); let len_deposit = T::SignedDepositByte::get().saturating_mul(encoded_len); - let weight_deposit = - T::SignedDepositWeight::get().saturating_mul(feasibility_weight.saturated_into()); + let weight_deposit = T::SignedDepositWeight::get() + .saturating_mul(feasibility_weight.ref_time().saturated_into()); T::SignedDepositBase::get() .saturating_add(len_deposit) @@ -957,7 +957,7 @@ mod tests { #[test] fn cannot_consume_too_much_future_weight() { ExtBuilder::default() - .signed_weight(40) + .signed_weight(Weight::from_ref_time(40)) .mock_weight_info(MockedWeightInfo::Basic) .build_and_execute(|| { roll_to(15); @@ -971,13 +971,13 @@ mod tests { raw.solution.unique_targets().len() as u32, ); // default solution will have 5 edges (5 * 5 + 10) - assert_eq!(solution_weight, 35); + assert_eq!(solution_weight, Weight::from_ref_time(35)); assert_eq!(raw.solution.voter_count(), 5); - assert_eq!(::SignedMaxWeight::get(), 40); + assert_eq!(::SignedMaxWeight::get(), Weight::from_ref_time(40)); assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(raw.clone()))); - ::set(30); + ::set(Weight::from_ref_time(30)); // note: resubmitting the same solution is technically okay as long as the queue has // space. diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index de25355f0ca5b..8ef7d87473159 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -699,54 +699,153 @@ mod max_weight { fn find_max_voter_binary_search_works() { let w = SolutionOrSnapshotSize { voters: 10, targets: 0 }; MockWeightInfo::set(crate::mock::MockedWeightInfo::Complex); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 0), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 999), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1000), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1001), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1990), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1999), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2000), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2001), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2010), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2990), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2999), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 3000), 3); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 3333), 3); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 5500), 5); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 7777), 7); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 9999), 9); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 10_000), 10); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 10_999), 10); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 11_000), 10); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 22_000), 10); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::zero()), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1990)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2990)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2999)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3000)), + 3 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + 3 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(5500)), + 5 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(7777)), + 7 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(9999)), + 9 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(10_000)), + 10 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(10_999)), + 10 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(11_000)), + 10 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(22_000)), + 10 + ); let w = SolutionOrSnapshotSize { voters: 1, targets: 0 }; - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 0), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 999), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1000), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1001), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1990), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1999), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2000), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2001), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2010), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 3333), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(0)), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1990)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + 1 + ); let w = SolutionOrSnapshotSize { voters: 2, targets: 0 }; - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 0), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 999), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1000), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1001), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1999), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2000), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2001), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2010), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 3333), 2); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(0)), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + 2 + ); } } @@ -1024,7 +1123,7 @@ mod tests { #[test] fn miner_trims_weight() { ExtBuilder::default() - .miner_weight(100) + .miner_weight(Weight::from_ref_time(100)) .mock_weight_info(crate::mock::MockedWeightInfo::Basic) .build_and_execute(|| { roll_to(25); @@ -1038,11 +1137,11 @@ mod tests { raw.solution.unique_targets().len() as u32, ); // default solution will have 5 edges (5 * 5 + 10) - assert_eq!(solution_weight, 35); + assert_eq!(solution_weight, Weight::from_ref_time(35)); assert_eq!(raw.solution.voter_count(), 5); // now reduce the max weight - ::set(25); + ::set(Weight::from_ref_time(25)); let (raw, witness) = MultiPhase::mine_solution().unwrap(); let solution_weight = ::solution_weight( @@ -1052,7 +1151,7 @@ mod tests { raw.solution.unique_targets().len() as u32, ); // default solution will have 5 edges (5 * 5 + 10) - assert_eq!(solution_weight, 25); + assert_eq!(solution_weight, Weight::from_ref_time(25)); assert_eq!(raw.solution.voter_count(), 3); }) } diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index 68ce00dd0de32..7ceb4a20e042a 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_election_provider_multi_phase. @@ -68,46 +68,46 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - (13_495_000 as Weight) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) + Weight::from_ref_time(13_495_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - (14_114_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(14_114_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - (13_756_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(13_756_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - (28_467_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(28_467_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - (21_991_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_991_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { - (3_186_000 as Weight) + Weight::from_ref_time(3_186_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((202_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(202_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add((60_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(60_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) @@ -119,13 +119,13 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn elect_queued(a: u32, d: u32, ) -> Weight { - (137_653_000 as Weight) + Weight::from_ref_time(137_653_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((640_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(640_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 6_000 - .saturating_add((48_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(8 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) @@ -134,9 +134,9 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit() -> Weight { - (49_313_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(49_313_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase Round (r:1 w:0) @@ -146,33 +146,33 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((867_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 7_000 - .saturating_add((107_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add((6_907_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(6_907_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 18_000 - .saturating_add((1_427_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((844_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(844_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 5_000 - .saturating_add((150_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 8_000 - .saturating_add((5_421_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(5_421_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 13_000 - .saturating_add((1_167_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(Weight::from_ref_time(1_167_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) } } @@ -187,46 +187,46 @@ impl WeightInfo for () { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - (13_495_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + Weight::from_ref_time(13_495_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - (14_114_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(14_114_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - (13_756_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(13_756_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - (28_467_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(28_467_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - (21_991_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_991_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { - (3_186_000 as Weight) + Weight::from_ref_time(3_186_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((202_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(202_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add((60_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(60_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) @@ -238,13 +238,13 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn elect_queued(a: u32, d: u32, ) -> Weight { - (137_653_000 as Weight) + Weight::from_ref_time(137_653_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((640_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(640_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 6_000 - .saturating_add((48_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(8 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) @@ -253,9 +253,9 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit() -> Weight { - (49_313_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(49_313_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase Round (r:1 w:0) @@ -265,32 +265,32 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((867_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 7_000 - .saturating_add((107_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add((6_907_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(6_907_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 18_000 - .saturating_add((1_427_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((844_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(844_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 5_000 - .saturating_add((150_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 8_000 - .saturating_add((5_421_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(5_421_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 13_000 - .saturating_add((1_167_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(Weight::from_ref_time(1_167_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) } } diff --git a/frame/election-provider-support/src/weights.rs b/frame/election-provider-support/src/weights.rs index c603b196519b5..4f9e47b09a4da 100644 --- a/frame/election-provider-support/src/weights.rs +++ b/frame/election-provider-support/src/weights.rs @@ -40,7 +40,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_election_provider_support_benchmarking. @@ -53,43 +53,43 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn phragmen(v: u32, t: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 667_000 - .saturating_add((32_973_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(32_973_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 1_334_000 - .saturating_add((1_334_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(1_334_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 60_644_000 - .saturating_add((2_636_364_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(Weight::from_ref_time(2_636_364_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) } fn phragmms(v: u32, t: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 73_000 - .saturating_add((21_073_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(21_073_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 146_000 - .saturating_add((65_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(65_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 6_649_000 - .saturating_add((1_711_424_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(Weight::from_ref_time(1_711_424_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn phragmen(v: u32, t: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 667_000 - .saturating_add((32_973_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(32_973_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 1_334_000 - .saturating_add((1_334_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(1_334_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 60_644_000 - .saturating_add((2_636_364_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(Weight::from_ref_time(2_636_364_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) } fn phragmms(v: u32, t: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 73_000 - .saturating_add((21_073_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(21_073_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 146_000 - .saturating_add((65_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(65_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 6_649_000 - .saturating_add((1_711_424_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(Weight::from_ref_time(1_711_424_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) } } diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 0f195f12cce3c..d71a74f76a114 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -30,6 +30,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } sp-core = { version = "6.0.0", path = "../../primitives/core" } +sp-tracing = { path = "../../primitives/tracing" } substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } [features] diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 44dd6aff09f0c..22d00a912a4f7 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -22,26 +22,19 @@ use super::*; use frame_benchmarking::{account, benchmarks, whitelist, BenchmarkError, BenchmarkResult}; -use frame_support::{ - dispatch::{DispatchResultWithPostInfo, UnfilteredDispatchable}, - traits::OnInitialize, -}; +use frame_support::{dispatch::DispatchResultWithPostInfo, traits::OnInitialize}; use frame_system::RawOrigin; use crate::Pallet as Elections; const BALANCE_FACTOR: u32 = 250; -const MAX_VOTERS: u32 = 500; -const MAX_CANDIDATES: u32 = 200; - -type Lookup = <::Lookup as StaticLookup>::Source; /// grab new account with infinite balance. fn endowed_account(name: &'static str, index: u32) -> T::AccountId { let account: T::AccountId = account(name, index, 0); // Fund each account with at-least his stake but still a sane amount as to not mess up // the vote calculation. - let amount = default_stake::(MAX_VOTERS) * BalanceOf::::from(BALANCE_FACTOR); + let amount = default_stake::(T::MaxVoters::get()) * BalanceOf::::from(BALANCE_FACTOR); let _ = T::Currency::make_free_balance_be(&account, amount); // important to increase the total issuance since T::CurrencyToVote will need it to be sane for // phragmen to work. @@ -51,7 +44,7 @@ fn endowed_account(name: &'static str, index: u32) -> T::AccountId { } /// Account to lookup type of system trait. -fn as_lookup(account: T::AccountId) -> Lookup { +fn as_lookup(account: T::AccountId) -> AccountIdLookupOf { T::Lookup::unlookup(account) } @@ -235,7 +228,7 @@ benchmarks! { submit_candidacy { // number of already existing candidates. - let c in 1 .. MAX_CANDIDATES; + let c in 1 .. T::MaxCandidates::get(); // we fix the number of members to the number of desired members and runners-up. We'll be in // this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); @@ -266,7 +259,7 @@ benchmarks! { // this will check members, runners-up and candidate for removal. Members and runners-up are // limited by the runtime bound, nonetheless we fill them by `m`. // number of already existing candidates. - let c in 1 .. MAX_CANDIDATES; + let c in 1 .. T::MaxCandidates::get(); // we fix the number of members to the number of desired members and runners-up. We'll be in // this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); @@ -345,38 +338,6 @@ benchmarks! { ))?; } - // -- Root ones - #[extra] // this calls into phragmen and consumes a full block for now. - remove_member_without_replacement_extra { - // worse case is when we remove a member and we have no runner as a replacement. This - // triggers phragmen again. The only parameter is how many candidates will compete for the - // new slot. - let c in 1 .. MAX_CANDIDATES; - clean::(); - - // fill only desired members. no runners-up. - let all_members = fill_seats_up_to::(T::DesiredMembers::get())?; - assert_eq!(>::members().len() as u32, T::DesiredMembers::get()); - - // submit a new one to compensate, with self-vote. - let replacements = submit_candidates_with_self_vote::(c, "new_candidate")?; - - // create some voters for these replacements. - distribute_voters::(replacements, MAX_VOTERS, MAXIMUM_VOTE)?; - - let to_remove = as_lookup::(all_members[0].clone()); - }: remove_member(RawOrigin::Root, to_remove, false) - verify { - // must still have the desired number of members members. - assert_eq!(>::members().len() as u32, T::DesiredMembers::get()); - #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } - } - remove_member_with_replacement { // easy case. We have a runner up. Nothing will have that much of an impact. m will be // number of members and runners. There is always at least one runner. @@ -385,7 +346,7 @@ benchmarks! { let _ = fill_seats_up_to::(m)?; let removing = as_lookup::(>::members_ids()[0].clone()); - }: remove_member(RawOrigin::Root, removing, true) + }: remove_member(RawOrigin::Root, removing, true, false) verify { // must still have enough members. assert_eq!(>::members().len() as u32, T::DesiredMembers::get()); @@ -397,49 +358,16 @@ benchmarks! { } } - remove_member_wrong_refund { - // The root call by mistake indicated that this will have no replacement, while it has! - // this has now consumed a lot of weight and need to refund. - let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); - clean::(); - - let _ = fill_seats_up_to::(m)?; - let removing = as_lookup::(>::members_ids()[0].clone()); - let who = T::Lookup::lookup(removing.clone()).expect("member was added above"); - let call = Call::::remove_member { who: removing, has_replacement: false }.encode(); - }: { - assert_eq!( - as Decode>::decode(&mut &*call) - .expect("call is encoded above, encoding must be correct") - .dispatch_bypass_filter(RawOrigin::Root.into()) - .unwrap_err() - .error, - Error::::InvalidReplacement.into(), - ); - } - verify { - // must still have enough members. - assert_eq!(>::members().len() as u32, T::DesiredMembers::get()); - // on fail, `who` must still be a member - assert!(>::members_ids().contains(&who)); - #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } - } - clean_defunct_voters { // total number of voters. - let v in (MAX_VOTERS / 2) .. MAX_VOTERS; + let v in (T::MaxVoters::get() / 2) .. T::MaxVoters::get(); // those that are defunct and need removal. - let d in 1 .. (MAX_VOTERS / 2); + let d in 1 .. (T::MaxVoters::get() / 2); // remove any previous stuff. clean::(); - let all_candidates = submit_candidates::(v, "candidates")?; + let all_candidates = submit_candidates::(T::MaxCandidates::get(), "candidates")?; distribute_voters::(all_candidates, v, MAXIMUM_VOTE)?; // all candidates leave. @@ -459,9 +387,9 @@ benchmarks! { // members, this is hard-coded in the runtime and cannot be trivially changed at this stage. // Yet, change the number of voters, candidates and edge per voter to see the impact. Note // that we give all candidates a self vote to make sure they are all considered. - let c in 1 .. MAX_CANDIDATES; - let v in 1 .. MAX_VOTERS; - let e in MAX_VOTERS .. MAX_VOTERS * MAXIMUM_VOTE as u32; + let c in 1 .. T::MaxCandidates::get(); + let v in 1 .. T::MaxVoters::get(); + let e in (T::MaxVoters::get()) .. T::MaxVoters::get() as u32 * MAXIMUM_VOTE as u32; clean::(); // so we have a situation with v and e. we want e to basically always be in the range of `e @@ -474,7 +402,7 @@ benchmarks! { let votes_per_voter = (e / v).min(MAXIMUM_VOTE as u32); let all_candidates = submit_candidates_with_self_vote::(c, "candidates")?; - let _ = distribute_voters::(all_candidates, v, votes_per_voter as usize)?; + let _ = distribute_voters::(all_candidates, v.saturating_sub(c), votes_per_voter as usize)?; }: { >::on_initialize(T::TermDuration::get()); } @@ -495,15 +423,19 @@ benchmarks! { #[extra] election_phragmen_c_e { - let c in 1 .. MAX_CANDIDATES; - let e in MAX_VOTERS .. MAX_VOTERS * MAXIMUM_VOTE as u32; - let fixed_v = MAX_VOTERS; + let c in 1 .. T::MaxCandidates::get(); + let e in (T::MaxVoters::get()) .. T::MaxVoters::get() * MAXIMUM_VOTE as u32; + let fixed_v = T::MaxVoters::get(); clean::(); let votes_per_voter = e / fixed_v; let all_candidates = submit_candidates_with_self_vote::(c, "candidates")?; - let _ = distribute_voters::(all_candidates, fixed_v, votes_per_voter as usize)?; + let _ = distribute_voters::( + all_candidates, + fixed_v - c, + votes_per_voter as usize, + )?; }: { >::on_initialize(T::TermDuration::get()); } @@ -525,7 +457,7 @@ benchmarks! { #[extra] election_phragmen_v { let v in 4 .. 16; - let fixed_c = MAX_CANDIDATES; + let fixed_c = T::MaxCandidates::get() / 10; let fixed_e = 64; clean::(); diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index ec2234cde5a6e..539e90d0179ff 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -100,7 +100,6 @@ use codec::{Decode, Encode}; use frame_support::{ - dispatch::WithPostDispatchInfo, traits::{ defensive_prelude::*, ChangeMembers, Contains, ContainsLengthBound, Currency, CurrencyToVote, Get, InitializeMembers, LockIdentifier, LockableCurrency, OnUnbalanced, @@ -177,6 +176,8 @@ pub struct SeatHolder { pub use pallet::*; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -249,6 +250,21 @@ pub mod pallet { #[pallet::constant] type TermDuration: Get; + /// The maximum number of candidates in a phragmen election. + /// + /// Warning: The election happens onchain, and this value will determine + /// the size of the election. When this limit is reached no more + /// candidates are accepted in the election. + #[pallet::constant] + type MaxCandidates: Get; + + /// The maximum number of voters to allow in a phragmen election. + /// + /// Warning: This impacts the size of the election which is run onchain. + /// When the limit is reached the new voters are ignored. + #[pallet::constant] + type MaxVoters: Get; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -263,7 +279,7 @@ pub mod pallet { if !term_duration.is_zero() && (n % term_duration).is_zero() { Self::do_phragmen() } else { - 0 + Weight::zero() } } } @@ -347,7 +363,7 @@ pub mod pallet { T::Currency::set_lock(T::PalletId::get(), &who, locked_stake, WithdrawReasons::all()); Voting::::insert(&who, Voter { votes, deposit: new_deposit, stake: locked_stake }); - Ok(None.into()) + Ok(None::.into()) } /// Remove `origin` as a voter. @@ -356,11 +372,11 @@ pub mod pallet { /// /// The dispatch origin of this call must be signed and be a voter. #[pallet::weight(T::WeightInfo::remove_voter())] - pub fn remove_voter(origin: OriginFor) -> DispatchResultWithPostInfo { + pub fn remove_voter(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(Self::is_voter(&who), Error::::MustBeVoter); Self::do_remove_voter(&who); - Ok(None.into()) + Ok(()) } /// Submit oneself for candidacy. A fixed amount of deposit is recorded. @@ -382,11 +398,15 @@ pub mod pallet { pub fn submit_candidacy( origin: OriginFor, #[pallet::compact] candidate_count: u32, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { let who = ensure_signed(origin)?; - let actual_count = >::decode_len().unwrap_or(0); - ensure!(actual_count as u32 <= candidate_count, Error::::InvalidWitnessData); + let actual_count = >::decode_len().unwrap_or(0) as u32; + ensure!(actual_count <= candidate_count, Error::::InvalidWitnessData); + ensure!( + actual_count <= ::MaxCandidates::get(), + Error::::TooManyCandidates + ); let index = Self::is_candidate(&who).err().ok_or(Error::::DuplicatedCandidate)?; @@ -397,7 +417,7 @@ pub mod pallet { .map_err(|_| Error::::InsufficientCandidateFunds)?; >::mutate(|c| c.insert(index, (who, T::CandidacyBond::get()))); - Ok(None.into()) + Ok(()) } /// Renounce one's intention to be a candidate for the next election round. 3 potential @@ -423,10 +443,7 @@ pub mod pallet { Renouncing::Member => T::WeightInfo::renounce_candidacy_members(), Renouncing::RunnerUp => T::WeightInfo::renounce_candidacy_runners_up(), })] - pub fn renounce_candidacy( - origin: OriginFor, - renouncing: Renouncing, - ) -> DispatchResultWithPostInfo { + pub fn renounce_candidacy(origin: OriginFor, renouncing: Renouncing) -> DispatchResult { let who = ensure_signed(origin)?; match renouncing { Renouncing::Member => { @@ -462,14 +479,18 @@ pub mod pallet { })?; }, }; - Ok(None.into()) + Ok(()) } /// Remove a particular member from the set. This is effective immediately and the bond of /// the outgoing member is slashed. /// /// If a runner-up is available, then the best runner-up will be removed and replaces the - /// outgoing member. Otherwise, a new phragmen election is started. + /// outgoing member. Otherwise, if `rerun_election` is `true`, a new phragmen election is + /// started, else, nothing happens. + /// + /// If `slash_bond` is set to true, the bond of the member being removed is slashed. Else, + /// it is returned. /// /// The dispatch origin of this call must be root. /// @@ -479,38 +500,29 @@ pub mod pallet { /// If we have a replacement, we use a small weight. Else, since this is a root call and /// will go into phragmen, we assume full block for now. /// # - #[pallet::weight(if *has_replacement { - T::WeightInfo::remove_member_with_replacement() - } else { + #[pallet::weight(if *rerun_election { T::WeightInfo::remove_member_without_replacement() + } else { + T::WeightInfo::remove_member_with_replacement() })] pub fn remove_member( origin: OriginFor, - who: ::Source, - has_replacement: bool, - ) -> DispatchResultWithPostInfo { + who: AccountIdLookupOf, + slash_bond: bool, + rerun_election: bool, + ) -> DispatchResult { ensure_root(origin)?; let who = T::Lookup::lookup(who)?; - let will_have_replacement = >::decode_len().map_or(false, |l| l > 0); - if will_have_replacement != has_replacement { - // In both cases, we will change more weight than need. Refund and abort. - return Err(Error::::InvalidReplacement.with_weight( - // refund. The weight value comes from a benchmark which is special to this. - T::WeightInfo::remove_member_wrong_refund(), - )) - } - - let had_replacement = Self::remove_and_replace_member(&who, true)?; - debug_assert_eq!(has_replacement, had_replacement); + let _ = Self::remove_and_replace_member(&who, slash_bond)?; Self::deposit_event(Event::MemberKicked { member: who }); - if !had_replacement { + if rerun_election { Self::do_phragmen(); } // no refund needed. - Ok(None.into()) + Ok(()) } /// Clean all voters who are defunct (i.e. they do not serve any purpose at all). The @@ -528,13 +540,13 @@ pub mod pallet { origin: OriginFor, _num_voters: u32, _num_defunct: u32, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { let _ = ensure_root(origin)?; >::iter() .filter(|(_, x)| Self::is_defunct_voter(&x.votes)) .for_each(|(dv, _)| Self::do_remove_voter(&dv)); - Ok(None.into()) + Ok(()) } } @@ -585,10 +597,10 @@ pub mod pallet { UnableToPayBond, /// Must be a voter. MustBeVoter, - /// Cannot report self. - ReportSelf, /// Duplicated candidate submission. DuplicatedCandidate, + /// Too many candidates have been created. + TooManyCandidates, /// Member cannot re-submit candidacy. MemberSubmit, /// Runner cannot re-submit candidacy. @@ -893,7 +905,7 @@ impl Pallet { if candidates_and_deposit.len().is_zero() { Self::deposit_event(Event::EmptyTerm); - return T::DbWeight::get().reads(5) + return T::DbWeight::get().reads(3) } // All of the new winners that come out of phragmen will thus have a deposit recorded. @@ -906,10 +918,29 @@ impl Pallet { let to_balance = |e: ExtendedBalance| T::CurrencyToVote::to_currency(e, total_issuance); let mut num_edges: u32 = 0; + + let max_voters = ::MaxVoters::get() as usize; // used for prime election. - let voters_and_stakes = Voting::::iter() - .map(|(voter, Voter { stake, votes, .. })| (voter, stake, votes)) - .collect::>(); + let mut voters_and_stakes = Vec::new(); + match Voting::::iter().try_for_each(|(voter, Voter { stake, votes, .. })| { + if voters_and_stakes.len() < max_voters { + voters_and_stakes.push((voter, stake, votes)); + Ok(()) + } else { + Err(()) + } + }) { + Ok(_) => (), + Err(_) => { + log::error!( + target: "runtime::elections-phragmen", + "Failed to run election. Number of voters exceeded", + ); + Self::deposit_event(Event::ElectionError); + return T::DbWeight::get().reads(3 + max_voters as u64) + }, + } + // used for phragmen. let voters_and_votes = voters_and_stakes .iter() @@ -1137,13 +1168,13 @@ mod tests { use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, ModuleError, + BuildStorage, }; use substrate_test_utils::assert_eq_uvec; parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { @@ -1242,6 +1273,8 @@ mod tests { parameter_types! { pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; + pub const PhragmenMaxVoters: u32 = 1000; + pub const PhragmenMaxCandidates: u32 = 100; } impl Config for Test { @@ -1260,6 +1293,8 @@ mod tests { type LoserCandidate = (); type KickedMember = (); type WeightInfo = (); + type MaxVoters = PhragmenMaxVoters; + type MaxCandidates = PhragmenMaxCandidates; } pub type Block = sp_runtime::generic::Block; @@ -1306,9 +1341,7 @@ mod tests { self } pub fn genesis_members(mut self, members: Vec<(u64, u64)>) -> Self { - MEMBERS.with(|m| { - *m.borrow_mut() = members.iter().map(|(m, _)| m.clone()).collect::>() - }); + MEMBERS.with(|m| *m.borrow_mut() = members.iter().map(|(m, _)| *m).collect::>()); self.genesis_members = members; self } @@ -1321,9 +1354,9 @@ mod tests { self } pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + sp_tracing::try_init_simple(); MEMBERS.with(|m| { - *m.borrow_mut() = - self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>() + *m.borrow_mut() = self.genesis_members.iter().map(|(m, _)| *m).collect::>() }); let mut ext: sp_io::TestExternalities = GenesisConfig { balances: pallet_balances::GenesisConfig:: { @@ -1452,7 +1485,7 @@ mod tests { ensure_members_has_approval_stake(); } - fn submit_candidacy(origin: Origin) -> DispatchResultWithPostInfo { + fn submit_candidacy(origin: Origin) -> sp_runtime::DispatchResult { Elections::submit_candidacy(origin, Elections::candidates().len() as u32) } @@ -2494,7 +2527,7 @@ mod tests { assert_ok!(submit_candidacy(Origin::signed(3))); assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::remove_member(Origin::root(), 4, false)); + assert_ok!(Elections::remove_member(Origin::root(), 4, true, true)); assert_eq!(balances(&4), (35, 2)); // slashed assert_eq!(Elections::election_rounds(), 2); // new election round @@ -2502,52 +2535,6 @@ mod tests { }); } - #[test] - fn remove_member_should_indicate_replacement() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - - System::set_block_number(5); - Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![4, 5]); - - // no replacement yet. - let unwrapped_error = Elections::remove_member(Origin::root(), 4, true).unwrap_err(); - assert!(matches!( - unwrapped_error.error, - DispatchError::Module(ModuleError { message: Some("InvalidReplacement"), .. }) - )); - assert!(unwrapped_error.post_info.actual_weight.is_some()); - }); - - ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - - System::set_block_number(5); - Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![4, 5]); - assert_eq!(runners_up_ids(), vec![3]); - - // there is a replacement! and this one needs a weight refund. - let unwrapped_error = Elections::remove_member(Origin::root(), 4, false).unwrap_err(); - assert!(matches!( - unwrapped_error.error, - DispatchError::Module(ModuleError { message: Some("InvalidReplacement"), .. }) - )); - assert!(unwrapped_error.post_info.actual_weight.is_some()); - }); - } - #[test] fn seats_should_be_released_when_no_vote() { ExtBuilder::default().build_and_execute(|| { @@ -2684,7 +2671,7 @@ mod tests { Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![2, 4]); - assert_ok!(Elections::remove_member(Origin::root(), 2, true)); + assert_ok!(Elections::remove_member(Origin::root(), 2, true, false)); assert_eq!(members_ids(), vec![4, 5]); }); } diff --git a/frame/elections-phragmen/src/migrations/v3.rs b/frame/elections-phragmen/src/migrations/v3.rs index b1cdd4be98541..9ec9c6e7eea6c 100644 --- a/frame/elections-phragmen/src/migrations/v3.rs +++ b/frame/elections-phragmen/src/migrations/v3.rs @@ -101,14 +101,14 @@ pub fn apply( StorageVersion::new(3).put::>(); - Weight::max_value() + Weight::MAX } else { log::warn!( target: "runtime::elections-phragmen", "Attempted to apply migration to V3 but failed because storage version is {:?}", storage_version, ); - 0 + Weight::zero() } } diff --git a/frame/elections-phragmen/src/migrations/v4.rs b/frame/elections-phragmen/src/migrations/v4.rs index e0fc17ec2a12d..76ef630706c50 100644 --- a/frame/elections-phragmen/src/migrations/v4.rs +++ b/frame/elections-phragmen/src/migrations/v4.rs @@ -38,7 +38,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { target: "runtime::elections-phragmen", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return 0 + return Weight::zero() } let storage_version = StorageVersion::get::>(); log::info!( @@ -63,7 +63,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { "Attempted to apply migration to v4 but failed because storage version is {:?}", storage_version, ); - 0 + Weight::zero() } } diff --git a/frame/elections-phragmen/src/migrations/v5.rs b/frame/elections-phragmen/src/migrations/v5.rs index a9fb018ba0219..eb96d7ddf6a99 100644 --- a/frame/elections-phragmen/src/migrations/v5.rs +++ b/frame/elections-phragmen/src/migrations/v5.rs @@ -8,7 +8,7 @@ use super::super::*; /// situation where they could increase their free balance but still not be able to use their funds /// because they were less than the lock. pub fn migrate(to_migrate: Vec) -> Weight { - let mut weight = 0; + let mut weight = Weight::new(); for who in to_migrate.iter() { if let Ok(mut voter) = Voting::::try_get(who) { diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index 5ad986bf142a3..14c69bf16f7f1 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -18,28 +18,30 @@ //! Autogenerated weights for pallet_elections_phragmen //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-08-08, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_elections_phragmen // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --pallet=pallet_elections_phragmen +// --chain=dev // --output=./frame/elections-phragmen/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_elections_phragmen. @@ -54,7 +56,6 @@ pub trait WeightInfo { fn renounce_candidacy_runners_up() -> Weight; fn remove_member_without_replacement() -> Weight; fn remove_member_with_replacement() -> Weight; - fn remove_member_wrong_refund() -> Weight; fn clean_defunct_voters(v: u32, d: u32, ) -> Weight; fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight; } @@ -67,61 +68,66 @@ impl WeightInfo for SubstrateWeight { // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - (27_798_000 as Weight) - // Standard Error: 5_000 - .saturating_add((238_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(27_011_000 as RefTimeWeight) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(214_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - (41_241_000 as Weight) - // Standard Error: 8_000 - .saturating_add((259_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(40_240_000 as RefTimeWeight) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(244_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - (41_313_000 as Weight) - // Standard Error: 8_000 - .saturating_add((255_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(40_394_000 as RefTimeWeight) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(217_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - (39_218_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_651_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) + /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - (41_348_000 as Weight) - // Standard Error: 1_000 - .saturating_add((112_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(42_217_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(50_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:1) + /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (35_522_000 as Weight) - // Standard Error: 1_000 - .saturating_add((92_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(46_459_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(26_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Elections Members (r:1 w:1) // Storage: Elections RunnersUp (r:1 w:1) @@ -129,72 +135,69 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - (47_887_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(45_189_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - (36_271_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(34_516_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Benchmark Override (r:0 w:0) fn remove_member_without_replacement() -> Weight { - (2_000_000_000_000 as Weight) + Weight::from_ref_time(2_000_000_000_000 as RefTimeWeight) } - // Storage: Elections RunnersUp (r:1 w:1) // Storage: Elections Members (r:1 w:1) // Storage: System Account (r:1 w:1) + // Storage: Elections RunnersUp (r:1 w:1) // Storage: Council Prime (r:1 w:1) // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - (55_024_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(51_838_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } - // Storage: Elections RunnersUp (r:1 w:0) - fn remove_member_wrong_refund() -> Weight { - (13_089_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - } - // Storage: Elections Voting (r:251 w:250) + // Storage: Elections Voting (r:5001 w:5000) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Candidates (r:1 w:0) - // Storage: Balances Locks (r:250 w:250) - // Storage: System Account (r:250 w:250) - fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 40_000 - .saturating_add((51_848_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 38_000 - .saturating_add((537_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + // Storage: Balances Locks (r:5000 w:5000) + // Storage: System Account (r:5000 w:5000) + /// The range of component `v` is `[5000, 10000]`. + /// The range of component `d` is `[1, 5000]`. + fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { + Weight::from_ref_time(0 as RefTimeWeight) + // Standard Error: 76_000 + .saturating_add(Weight::from_ref_time(63_721_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:1) // Storage: Elections RunnersUp (r:1 w:1) - // Storage: Elections Voting (r:502 w:0) + // Storage: Elections Voting (r:10001 w:0) // Storage: Council Proposals (r:1 w:0) // Storage: Elections ElectionRounds (r:1 w:1) // Storage: Council Members (r:0 w:1) // Storage: Council Prime (r:0 w:1) - // Storage: System Account (r:2 w:2) + // Storage: System Account (r:1 w:1) + /// The range of component `c` is `[1, 1000]`. + /// The range of component `v` is `[1, 10000]`. + /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 1_664_000 - .saturating_add((30_736_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 692_000 - .saturating_add((49_739_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 47_000 - .saturating_add((3_363_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) + Weight::from_ref_time(0 as RefTimeWeight) + // Standard Error: 773_000 + .saturating_add(Weight::from_ref_time(81_534_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + // Standard Error: 51_000 + .saturating_add(Weight::from_ref_time(4_453_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(280 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) } } @@ -205,61 +208,66 @@ impl WeightInfo for () { // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - (27_798_000 as Weight) - // Standard Error: 5_000 - .saturating_add((238_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(27_011_000 as RefTimeWeight) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(214_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - (41_241_000 as Weight) - // Standard Error: 8_000 - .saturating_add((259_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(40_240_000 as RefTimeWeight) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(244_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - (41_313_000 as Weight) - // Standard Error: 8_000 - .saturating_add((255_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(40_394_000 as RefTimeWeight) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(217_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - (39_218_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_651_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) + /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - (41_348_000 as Weight) - // Standard Error: 1_000 - .saturating_add((112_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(42_217_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(50_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:1) + /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (35_522_000 as Weight) - // Standard Error: 1_000 - .saturating_add((92_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(46_459_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(26_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Elections Members (r:1 w:1) // Storage: Elections RunnersUp (r:1 w:1) @@ -267,71 +275,68 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - (47_887_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(45_189_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - (36_271_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(34_516_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Benchmark Override (r:0 w:0) fn remove_member_without_replacement() -> Weight { - (2_000_000_000_000 as Weight) + Weight::from_ref_time(2_000_000_000_000 as RefTimeWeight) } - // Storage: Elections RunnersUp (r:1 w:1) // Storage: Elections Members (r:1 w:1) // Storage: System Account (r:1 w:1) + // Storage: Elections RunnersUp (r:1 w:1) // Storage: Council Prime (r:1 w:1) // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - (55_024_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(51_838_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } - // Storage: Elections RunnersUp (r:1 w:0) - fn remove_member_wrong_refund() -> Weight { - (13_089_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - } - // Storage: Elections Voting (r:251 w:250) + // Storage: Elections Voting (r:5001 w:5000) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Candidates (r:1 w:0) - // Storage: Balances Locks (r:250 w:250) - // Storage: System Account (r:250 w:250) - fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 40_000 - .saturating_add((51_848_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 38_000 - .saturating_add((537_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + // Storage: Balances Locks (r:5000 w:5000) + // Storage: System Account (r:5000 w:5000) + /// The range of component `v` is `[5000, 10000]`. + /// The range of component `d` is `[1, 5000]`. + fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { + Weight::from_ref_time(0 as RefTimeWeight) + // Standard Error: 76_000 + .saturating_add(Weight::from_ref_time(63_721_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:1) // Storage: Elections RunnersUp (r:1 w:1) - // Storage: Elections Voting (r:502 w:0) + // Storage: Elections Voting (r:10001 w:0) // Storage: Council Proposals (r:1 w:0) // Storage: Elections ElectionRounds (r:1 w:1) // Storage: Council Members (r:0 w:1) // Storage: Council Prime (r:0 w:1) - // Storage: System Account (r:2 w:2) + // Storage: System Account (r:1 w:1) + /// The range of component `c` is `[1, 1000]`. + /// The range of component `v` is `[1, 10000]`. + /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 1_664_000 - .saturating_add((30_736_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 692_000 - .saturating_add((49_739_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 47_000 - .saturating_add((3_363_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) + Weight::from_ref_time(0 as RefTimeWeight) + // Standard Error: 773_000 + .saturating_add(Weight::from_ref_time(81_534_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + // Standard Error: 51_000 + .saturating_add(Weight::from_ref_time(4_453_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(280 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) } } diff --git a/frame/examples/basic/src/benchmarking.rs b/frame/examples/basic/src/benchmarking.rs index d7b933577ead5..93e14f358208e 100644 --- a/frame/examples/basic/src/benchmarking.rs +++ b/frame/examples/basic/src/benchmarking.rs @@ -26,7 +26,7 @@ use frame_system::RawOrigin; // To actually run this benchmark on pallet-example-basic, we need to put this pallet into the // runtime and compile it with `runtime-benchmarks` feature. The detail procedures are // documented at: -// https://docs.substrate.io/v3/runtime/benchmarking#how-to-benchmark +// https://docs.substrate.io/reference/how-to-guides/weights/add-benchmarks/ // // The auto-generated weight estimate of this pallet is copied over to the `weights.rs` file. // The exact command of how the estimate generated is printed at the top of the file. diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index f8acc1962388f..ad46bdc4185bd 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -318,7 +318,7 @@ const MILLICENTS: u32 = 1_000_000_000; // - assigns a dispatch class `operational` if the argument of the call is more than 1000. // // More information can be read at: -// - https://docs.substrate.io/v3/runtime/weights-and-fees +// - https://docs.substrate.io/main-docs/build/tx-weights-fees/ // // Manually configuring weight is an advanced operation and what you really need may well be // fulfilled by running the benchmarking toolchain. Refer to `benchmarking.rs` file. @@ -329,7 +329,7 @@ impl WeighData<(&BalanceOf,)> for WeightForSetDum let multiplier = self.0; // *target.0 is the amount passed into the extrinsic let cents = *target.0 / >::from(MILLICENTS); - (cents * multiplier).saturated_into::() + Weight::from_ref_time((cents * multiplier).saturated_into::()) } } @@ -392,7 +392,7 @@ pub mod pallet { fn on_initialize(_n: T::BlockNumber) -> Weight { // Anything that needs to be done at the start of the block. // We don't do anything here. - 0 + Weight::zero() } // `on_finalize` is executed at the end of block after all extrinsic are dispatched. diff --git a/frame/examples/basic/src/tests.rs b/frame/examples/basic/src/tests.rs index 0f659e12fb443..f6afb7a0c77f1 100644 --- a/frame/examples/basic/src/tests.rs +++ b/frame/examples/basic/src/tests.rs @@ -52,7 +52,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -190,7 +190,7 @@ fn weights_work() { let default_call = pallet_example_basic::Call::::accumulate_dummy { increase_by: 10 }; let info1 = default_call.get_dispatch_info(); // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` - assert!(info1.weight > 0); + assert!(info1.weight > Weight::zero()); // `set_dummy` is simpler than `accumulate_dummy`, and the weight // should be less. diff --git a/frame/examples/basic/src/weights.rs b/frame/examples/basic/src/weights.rs index 5fc6434e396eb..e8fc44bc4b050 100644 --- a/frame/examples/basic/src/weights.rs +++ b/frame/examples/basic/src/weights.rs @@ -49,7 +49,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_example_basic. @@ -63,39 +63,39 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn set_dummy_benchmark(b: u32, ) -> Weight { - (5_834_000 as Weight) - .saturating_add((24_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(5_834_000 as RefTimeWeight) + .saturating_add(Weight::from_ref_time(24_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } fn accumulate_dummy(b: u32, ) -> Weight { - (51_353_000 as Weight) - .saturating_add((14_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(51_353_000 as RefTimeWeight) + .saturating_add(Weight::from_ref_time(14_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } fn sort_vector(x: u32, ) -> Weight { - (2_569_000 as Weight) + Weight::from_ref_time(2_569_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn set_dummy_benchmark(b: u32, ) -> Weight { - (5_834_000 as Weight) - .saturating_add((24_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(5_834_000 as RefTimeWeight) + .saturating_add(Weight::from_ref_time(24_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } fn accumulate_dummy(b: u32, ) -> Weight { - (51_353_000 as Weight) - .saturating_add((14_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(51_353_000 as RefTimeWeight) + .saturating_add(Weight::from_ref_time(14_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } fn sort_vector(x: u32, ) -> Weight { - (2_569_000 as Weight) + Weight::from_ref_time(2_569_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) } } diff --git a/frame/examples/offchain-worker/src/tests.rs b/frame/examples/offchain-worker/src/tests.rs index e5bd9fabc629b..5b03614333cc9 100644 --- a/frame/examples/offchain-worker/src/tests.rs +++ b/frame/examples/offchain-worker/src/tests.rs @@ -53,7 +53,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -253,10 +253,9 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { ) .unwrap(); - let public_key = SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) + let public_key = *SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) .get(0) - .unwrap() - .clone(); + .unwrap(); let mut t = sp_io::TestExternalities::default(); t.register_extension(OffchainWorkerExt::new(offchain)); @@ -313,10 +312,9 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { ) .unwrap(); - let public_key = SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) + let public_key = *SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) .get(0) - .unwrap() - .clone(); + .unwrap(); let mut t = sp_io::TestExternalities::default(); t.register_extension(OffchainWorkerExt::new(offchain)); diff --git a/frame/executive/README.md b/frame/executive/README.md index e96d07b0843f2..c14c3912b082d 100644 --- a/frame/executive/README.md +++ b/frame/executive/README.md @@ -56,7 +56,7 @@ struct CustomOnRuntimeUpgrade; impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { fn on_runtime_upgrade() -> frame_support::weights::Weight { // Do whatever you want. - 0 + frame_support::weights::Weight::zero() } } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index c40fdf94806aa..45361084f2f42 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -107,7 +107,7 @@ //! impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { //! fn on_runtime_upgrade() -> frame_support::weights::Weight { //! // Do whatever you want. -//! 0 +//! frame_support::weights::Weight::zero() //! } //! } //! @@ -123,12 +123,12 @@ use frame_support::{ EnsureInherentsAreFirst, ExecuteBlock, OffchainWorker, OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, }, - weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Weight}, }; use sp_runtime::{ generic::Digest, traits::{ - self, Applyable, CheckEqual, Checkable, Dispatchable, Header, NumberFor, One, Saturating, + self, Applyable, CheckEqual, Checkable, Dispatchable, Header, NumberFor, One, ValidateUnsigned, Zero, }, transaction_validity::{TransactionSource, TransactionValidity}, @@ -299,7 +299,7 @@ where // This means the format of all the event related storages must always be compatible. >::reset_events(); - let mut weight = 0; + let mut weight = Weight::new(); if Self::runtime_upgraded() { weight = weight.saturating_add(Self::execute_on_runtime_upgrade()); } @@ -413,7 +413,7 @@ where let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); - if remaining_weight > 0 { + if remaining_weight > Weight::zero() { let used_weight = >::on_idle( block_number, remaining_weight, @@ -573,7 +573,7 @@ mod tests { use pallet_balances::Call as BalancesCall; use pallet_transaction_payment::CurrencyAdapter; - const TEST_KEY: &[u8] = &*b":test:key:"; + const TEST_KEY: &[u8] = b":test:key:"; #[frame_support::pallet] mod custom { @@ -593,12 +593,12 @@ mod tests { // one with block number arg and one without fn on_initialize(n: T::BlockNumber) -> Weight { println!("on_initialize({})", n); - 175 + Weight::from_ref_time(175) } fn on_idle(n: T::BlockNumber, remaining_weight: Weight) -> Weight { println!("on_idle{}, {})", n, remaining_weight); - 175 + Weight::from_ref_time(175) } fn on_finalize(n: T::BlockNumber) { @@ -607,7 +607,7 @@ mod tests { fn on_runtime_upgrade() -> Weight { sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); - 200 + Weight::from_ref_time(200) } fn offchain_worker(n: T::BlockNumber) { @@ -721,9 +721,9 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::builder() - .base_block(10) - .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = 5) - .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = 1024.into()) + .base_block(Weight::from_ref_time(10)) + .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = Weight::from_ref_time(5)) + .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_ref_time(1024).into()) .build_or_panic(); pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, @@ -806,7 +806,7 @@ mod tests { type TestUncheckedExtrinsic = TestXt; // Will contain `true` when the custom runtime logic was called. - const CUSTOM_ON_RUNTIME_KEY: &[u8] = &*b":custom:on_runtime"; + const CUSTOM_ON_RUNTIME_KEY: &[u8] = b":custom:on_runtime"; struct CustomOnRuntimeUpgrade; impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade { @@ -814,7 +814,7 @@ mod tests { sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); System::deposit_event(frame_system::Event::CodeUpdated); - 100 + Weight::from_ref_time(100) } } @@ -988,12 +988,12 @@ mod tests { sign_extra(1, 0, 0), ); let encoded = xt.encode(); - let encoded_len = encoded.len() as Weight; + let encoded_len = encoded.len() as u64; // on_initialize weight + base block execution weight let block_weights = ::BlockWeights::get(); - let base_block_weight = 175 + block_weights.base_block; + let base_block_weight = Weight::from_ref_time(175) + block_weights.base_block; let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; - let num_to_exhaust_block = limit / (encoded_len + 5); + let num_to_exhaust_block = limit.ref_time() / (encoded_len + 5); t.execute_with(|| { Executive::initialize_block(&Header::new( 1, @@ -1016,7 +1016,7 @@ mod tests { assert_eq!( >::block_weight().total(), //--------------------- on_initialize + block_execution + extrinsic_base weight - (encoded_len + 5) * (nonce + 1) + base_block_weight, + Weight::from_ref_time((encoded_len + 5) * (nonce + 1)) + base_block_weight, ); assert_eq!( >::extrinsic_index(), @@ -1047,8 +1047,8 @@ mod tests { let mut t = new_test_ext(1); t.execute_with(|| { // Block execution weight + on_initialize weight from custom module - let base_block_weight = - 175 + ::BlockWeights::get().base_block; + let base_block_weight = Weight::from_ref_time(175) + + ::BlockWeights::get().base_block; Executive::initialize_block(&Header::new( 1, @@ -1066,7 +1066,7 @@ mod tests { assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); // default weight for `TestXt` == encoded length. - let extrinsic_weight = len as Weight + + let extrinsic_weight = Weight::from_ref_time(len as u64) + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; @@ -1180,7 +1180,10 @@ mod tests { // NOTE: might need updates over time if new weights are introduced. // For now it only accounts for the base block execution weight and // the `on_initialize` weight defined in the custom test module. - assert_eq!(>::block_weight().total(), 175 + 175 + 10); + assert_eq!( + >::block_weight().total(), + Weight::from_ref_time(175 + 175 + 10) + ); }) } diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs index 59522f9a106f2..b94b7d164f04c 100644 --- a/frame/gilt/src/lib.rs +++ b/frame/gilt/src/lib.rs @@ -335,7 +335,7 @@ pub mod pallet { if (n % T::IntakePeriod::get()).is_zero() { Self::pursue_target(T::MaxIntakeBids::get()) } else { - 0 + Weight::zero() } } } diff --git a/frame/gilt/src/weights.rs b/frame/gilt/src/weights.rs index 952080a2d030b..3d2b629e8b16b 100644 --- a/frame/gilt/src/weights.rs +++ b/frame/gilt/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_gilt. @@ -60,70 +60,70 @@ impl WeightInfo for SubstrateWeight { // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid(l: u32, ) -> Weight { - (41_605_000 as Weight) + Weight::from_ref_time(41_605_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((62_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(62_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid_max() -> Weight { - (97_715_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(97_715_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn retract_bid(l: u32, ) -> Weight { - (42_061_000 as Weight) + Weight::from_ref_time(42_061_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((52_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt ActiveTotal (r:1 w:1) fn set_target() -> Weight { - (5_026_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(5_026_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Gilt Active (r:1 w:1) // Storage: Gilt ActiveTotal (r:1 w:1) fn thaw() -> Weight { - (47_753_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(47_753_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt ActiveTotal (r:1 w:0) fn pursue_target_noop() -> Weight { - (1_663_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(1_663_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_item(b: u32, ) -> Weight { - (40_797_000 as Weight) + Weight::from_ref_time(40_797_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((4_122_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(b as Weight))) + .saturating_add(Weight::from_ref_time(4_122_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_queue(q: u32, ) -> Weight { - (14_944_000 as Weight) + Weight::from_ref_time(14_944_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add((8_135_000 as Weight).saturating_mul(q as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(q as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(q as Weight))) + .saturating_add(Weight::from_ref_time(8_135_000 as RefTimeWeight).scalar_saturating_mul(q as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) } } @@ -132,69 +132,69 @@ impl WeightInfo for () { // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid(l: u32, ) -> Weight { - (41_605_000 as Weight) + Weight::from_ref_time(41_605_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((62_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(62_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid_max() -> Weight { - (97_715_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(97_715_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn retract_bid(l: u32, ) -> Weight { - (42_061_000 as Weight) + Weight::from_ref_time(42_061_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((52_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt ActiveTotal (r:1 w:1) fn set_target() -> Weight { - (5_026_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(5_026_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Gilt Active (r:1 w:1) // Storage: Gilt ActiveTotal (r:1 w:1) fn thaw() -> Weight { - (47_753_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(47_753_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt ActiveTotal (r:1 w:0) fn pursue_target_noop() -> Weight { - (1_663_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(1_663_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_item(b: u32, ) -> Weight { - (40_797_000 as Weight) + Weight::from_ref_time(40_797_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((4_122_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(b as Weight))) + .saturating_add(Weight::from_ref_time(4_122_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_queue(q: u32, ) -> Weight { - (14_944_000 as Weight) + Weight::from_ref_time(14_944_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add((8_135_000 as Weight).saturating_mul(q as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(q as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(q as Weight))) + .saturating_add(Weight::from_ref_time(8_135_000 as RefTimeWeight).scalar_saturating_mul(q as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) } } diff --git a/frame/grandpa/src/default_weights.rs b/frame/grandpa/src/default_weights.rs index 330e9bb255177..f21c3ddc101f7 100644 --- a/frame/grandpa/src/default_weights.rs +++ b/frame/grandpa/src/default_weights.rs @@ -35,7 +35,7 @@ impl crate::WeightInfo for () { // checking membership proof (35 * WEIGHT_PER_MICROS) - .saturating_add((175 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) + .saturating_add((175 * WEIGHT_PER_NANOS).scalar_saturating_mul(validator_count)) .saturating_add(DbWeight::get().reads(5)) // check equivocation proof .saturating_add(95 * WEIGHT_PER_MICROS) diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index 804272c20480f..181d22fba545c 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -353,9 +353,9 @@ impl Offence self.time_slot } - fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { + fn slash_fraction(&self, offenders_count: u32) -> Perbill { // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational(3 * offenders_count, validator_set_count); + let x = Perbill::from_rational(3 * offenders_count, self.validator_set_count); // _ ^ 2 x.square() } diff --git a/frame/grandpa/src/migrations/v4.rs b/frame/grandpa/src/migrations/v4.rs index ab43f7baef4e9..81dbd3bab4b67 100644 --- a/frame/grandpa/src/migrations/v4.rs +++ b/frame/grandpa/src/migrations/v4.rs @@ -37,7 +37,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { target: "runtime::afg", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return 0 + return Weight::zero() } let storage_version = StorageVersion::get::>(); log::info!( @@ -57,7 +57,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { ::BlockWeights::get().max_block } else { - 0 + Weight::zero() } } diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 5e6c955c441c5..d246466cf0db4 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -71,7 +71,7 @@ impl_opaque_keys! { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index ab0a9c677b00e..9c39069bf9538 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -856,7 +856,7 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight > 0); + assert!(info.weight > Weight::zero()); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index b225db4edfa91..5d409f48bf567 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -23,7 +23,10 @@ use super::*; use crate::Pallet as Identity; use frame_benchmarking::{account, benchmarks, whitelisted_caller}; -use frame_support::{ensure, traits::Get}; +use frame_support::{ + ensure, + traits::{EnsureOrigin, Get}, +}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -37,8 +40,10 @@ fn assert_last_event(generic_event: ::Event) { fn add_registrars(r: u32) -> Result<(), &'static str> { for i in 0..r { let registrar: T::AccountId = account("registrar", i, SEED); + let registrar_lookup = T::Lookup::unlookup(registrar.clone()); let _ = T::Currency::make_free_balance_be(®istrar, BalanceOf::::max_value()); - Identity::::add_registrar(RawOrigin::Root.into(), registrar.clone())?; + let registrar_origin = T::RegistrarOrigin::successful_origin(); + Identity::::add_registrar(registrar_origin, registrar_lookup)?; Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), i, 10u32.into())?; let fields = IdentityFields( @@ -114,7 +119,9 @@ benchmarks! { add_registrar { let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; ensure!(Registrars::::get().len() as u32 == r, "Registrars not set up correctly."); - }: _(RawOrigin::Root, account("registrar", r + 1, SEED)) + let origin = T::RegistrarOrigin::successful_origin(); + let account = T::Lookup::unlookup(account("registrar", r + 1, SEED)); + }: _(origin, account) verify { ensure!(Registrars::::get().len() as u32 == r + 1, "Registrars not added."); } @@ -125,7 +132,7 @@ benchmarks! { let caller = { // The target user let caller: T::AccountId = whitelisted_caller(); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_lookup = T::Lookup::unlookup(caller.clone()); let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -256,10 +263,12 @@ benchmarks! { set_fee { let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + let registrar_origin = T::RegistrarOrigin::successful_origin(); + Identity::::add_registrar(registrar_origin, caller_lookup)?; let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().fee == 0u32.into(), "Fee already set."); }: _(RawOrigin::Signed(caller), r, 100u32.into()) @@ -270,14 +279,17 @@ benchmarks! { set_account_id { let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + let registrar_origin = T::RegistrarOrigin::successful_origin(); + Identity::::add_registrar(registrar_origin, caller_lookup)?; let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().account == caller, "id not set."); - }: _(RawOrigin::Signed(caller), r, account("new", 0, SEED)) + let new_account = T::Lookup::unlookup(account("new", 0, SEED)); + }: _(RawOrigin::Signed(caller), r, new_account) verify { let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().account == account("new", 0, SEED), "id not changed."); @@ -285,11 +297,13 @@ benchmarks! { set_fields { let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + let registrar_origin = T::RegistrarOrigin::successful_origin(); + Identity::::add_registrar(registrar_origin, caller_lookup)?; let fields = IdentityFields( IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter @@ -310,6 +324,7 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; @@ -318,7 +333,8 @@ benchmarks! { Identity::::set_identity(user_origin.clone(), Box::new(info))?; }; - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + let registrar_origin = T::RegistrarOrigin::successful_origin(); + Identity::::add_registrar(registrar_origin, caller_lookup)?; Identity::::request_judgement(user_origin, r, 10u32.into())?; }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable) verify { @@ -332,7 +348,7 @@ benchmarks! { let target: T::AccountId = account("target", 0, SEED); let target_origin: ::Origin = RawOrigin::Signed(target.clone()).into(); - let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); + let target_lookup = T::Lookup::unlookup(target.clone()); let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); let info = create_identity_info::(x); @@ -350,7 +366,8 @@ benchmarks! { )?; } ensure!(IdentityOf::::contains_key(&target), "Identity not set"); - }: _(RawOrigin::Root, target_lookup) + let origin = T::ForceOrigin::successful_origin(); + }: _(origin, target_lookup) verify { ensure!(!IdentityOf::::contains_key(&target), "Identity not removed"); } diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 46f847606903d..0f80acceb949c 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -94,6 +94,7 @@ type BalanceOf = type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[frame_support::pallet] pub mod pallet { @@ -282,9 +283,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::add_registrar(T::MaxRegistrars::get()))] pub fn add_registrar( origin: OriginFor, - account: T::AccountId, + account: AccountIdLookupOf, ) -> DispatchResultWithPostInfo { T::RegistrarOrigin::ensure_origin(origin)?; + let account = T::Lookup::lookup(account)?; let (i, registrar_count) = >::try_mutate( |registrars| -> Result<(RegistrarIndex, usize), DispatchError> { @@ -672,9 +674,10 @@ pub mod pallet { pub fn set_account_id( origin: OriginFor, #[pallet::compact] index: RegistrarIndex, - new: T::AccountId, + new: AccountIdLookupOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; + let new = T::Lookup::lookup(new)?; let registrars = >::mutate(|rs| -> Result { rs.get_mut(index as usize) @@ -760,7 +763,7 @@ pub mod pallet { pub fn provide_judgement( origin: OriginFor, #[pallet::compact] reg_index: RegistrarIndex, - target: ::Source, + target: AccountIdLookupOf, judgement: Judgement>, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; @@ -827,7 +830,7 @@ pub mod pallet { ))] pub fn kill_identity( origin: OriginFor, - target: ::Source, + target: AccountIdLookupOf, ) -> DispatchResultWithPostInfo { T::ForceOrigin::ensure_origin(origin)?; @@ -863,7 +866,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::add_sub(T::MaxSubAccounts::get()))] pub fn add_sub( origin: OriginFor, - sub: ::Source, + sub: AccountIdLookupOf, data: Data, ) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -898,7 +901,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::rename_sub(T::MaxSubAccounts::get()))] pub fn rename_sub( origin: OriginFor, - sub: ::Source, + sub: AccountIdLookupOf, data: Data, ) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -917,10 +920,7 @@ pub mod pallet { /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. #[pallet::weight(T::WeightInfo::remove_sub(T::MaxSubAccounts::get()))] - pub fn remove_sub( - origin: OriginFor, - sub: ::Source, - ) -> DispatchResult { + pub fn remove_sub(origin: OriginFor, sub: AccountIdLookupOf) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(IdentityOf::::contains_key(&sender), Error::::NoIdentity); let sub = T::Lookup::lookup(sub)?; diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index e2c10feefa9ff..391dd7e7ae8fd 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -50,7 +50,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs index 7d3371c31b03b..780abf1bb01df 100644 --- a/frame/identity/src/weights.rs +++ b/frame/identity/src/weights.rs @@ -40,7 +40,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_identity. @@ -69,48 +69,48 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { - (16_649_000 as Weight) + Weight::from_ref_time(16_649_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((241_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(241_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn set_identity(r: u32, x: u32, ) -> Weight { - (31_322_000 as Weight) + Weight::from_ref_time(31_322_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((252_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(252_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((312_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(312_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn set_subs_new(s: u32, ) -> Weight { - (30_012_000 as Weight) + Weight::from_ref_time(30_012_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((3_005_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(3_005_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn set_subs_old(p: u32, ) -> Weight { - (29_623_000 as Weight) + Weight::from_ref_time(29_623_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_100_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(1_100_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) @@ -119,81 +119,81 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 100]`. /// The range of component `x` is `[1, 100]`. fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (34_370_000 as Weight) + Weight::from_ref_time(34_370_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((186_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((1_114_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(1_114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((189_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(189_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { - (34_759_000 as Weight) + Weight::from_ref_time(34_759_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((251_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(251_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add((340_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(340_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn cancel_request(r: u32, x: u32, ) -> Weight { - (32_254_000 as Weight) + Weight::from_ref_time(32_254_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add((159_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(159_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add((347_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(347_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { - (7_858_000 as Weight) + Weight::from_ref_time(7_858_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((190_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(190_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { - (8_011_000 as Weight) + Weight::from_ref_time(8_011_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((187_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(187_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { - (7_970_000 as Weight) + Weight::from_ref_time(7_970_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((175_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 19]`. /// The range of component `x` is `[1, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { - (24_730_000 as Weight) + Weight::from_ref_time(24_730_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((196_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(196_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add((341_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(341_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) @@ -203,58 +203,58 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 100]`. /// The range of component `x` is `[1, 100]`. fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - (44_988_000 as Weight) + Weight::from_ref_time(44_988_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((201_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((1_126_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(1_126_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((2_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 99]`. fn add_sub(s: u32, ) -> Weight { - (36_768_000 as Weight) + Weight::from_ref_time(36_768_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((115_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { - (13_474_000 as Weight) + Weight::from_ref_time(13_474_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { - (37_720_000 as Weight) + Weight::from_ref_time(37_720_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 99]`. fn quit_sub(s: u32, ) -> Weight { - (26_848_000 as Weight) + Weight::from_ref_time(26_848_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((115_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -263,48 +263,48 @@ impl WeightInfo for () { // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { - (16_649_000 as Weight) + Weight::from_ref_time(16_649_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((241_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(241_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn set_identity(r: u32, x: u32, ) -> Weight { - (31_322_000 as Weight) + Weight::from_ref_time(31_322_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((252_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(252_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((312_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(312_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn set_subs_new(s: u32, ) -> Weight { - (30_012_000 as Weight) + Weight::from_ref_time(30_012_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((3_005_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(3_005_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn set_subs_old(p: u32, ) -> Weight { - (29_623_000 as Weight) + Weight::from_ref_time(29_623_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_100_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(1_100_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) @@ -313,81 +313,81 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 100]`. /// The range of component `x` is `[1, 100]`. fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (34_370_000 as Weight) + Weight::from_ref_time(34_370_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((186_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((1_114_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(1_114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((189_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(189_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { - (34_759_000 as Weight) + Weight::from_ref_time(34_759_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((251_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(251_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add((340_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(340_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn cancel_request(r: u32, x: u32, ) -> Weight { - (32_254_000 as Weight) + Weight::from_ref_time(32_254_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add((159_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(159_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add((347_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(347_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { - (7_858_000 as Weight) + Weight::from_ref_time(7_858_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((190_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(190_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { - (8_011_000 as Weight) + Weight::from_ref_time(8_011_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((187_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(187_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { - (7_970_000 as Weight) + Weight::from_ref_time(7_970_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((175_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 19]`. /// The range of component `x` is `[1, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { - (24_730_000 as Weight) + Weight::from_ref_time(24_730_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((196_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(196_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add((341_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(341_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) @@ -397,57 +397,57 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 100]`. /// The range of component `x` is `[1, 100]`. fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - (44_988_000 as Weight) + Weight::from_ref_time(44_988_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((201_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((1_126_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(1_126_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((2_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 99]`. fn add_sub(s: u32, ) -> Weight { - (36_768_000 as Weight) + Weight::from_ref_time(36_768_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((115_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { - (13_474_000 as Weight) + Weight::from_ref_time(13_474_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { - (37_720_000 as Weight) + Weight::from_ref_time(37_720_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 99]`. fn quit_sub(s: u32, ) -> Weight { - (26_848_000 as Weight) + Weight::from_ref_time(26_848_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((115_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index f190f6672f309..34c1c70d79f75 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -958,12 +958,12 @@ impl Offence for UnresponsivenessOffence { self.session_index } - fn slash_fraction(offenders: u32, validator_set_count: u32) -> Perbill { + fn slash_fraction(&self, offenders: u32) -> Perbill { // the formula is min((3 * (k - (n / 10 + 1))) / n, 1) * 0.07 // basically, 10% can be offline with no slash, but after that, it linearly climbs up to 7% // when 13/30 are offline (around 5% when 1/3 are offline). - if let Some(threshold) = offenders.checked_sub(validator_set_count / 10 + 1) { - let x = Perbill::from_rational(3 * threshold, validator_set_count); + if let Some(threshold) = offenders.checked_sub(self.validator_set_count / 10 + 1) { + let x = Perbill::from_rational(3 * threshold, self.validator_set_count); x.saturating_mul(Perbill::from_percent(7)) } else { Perbill::default() diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index 2459f7e748941..b734bd37b6fd4 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -123,7 +123,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Runtime { diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index 288081556a085..05e1af169dba9 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -36,22 +36,24 @@ use sp_runtime::{ #[test] fn test_unresponsiveness_slash_fraction() { + let dummy_offence = + UnresponsivenessOffence { session_index: 0, validator_set_count: 50, offenders: vec![()] }; // A single case of unresponsiveness is not slashed. - assert_eq!(UnresponsivenessOffence::<()>::slash_fraction(1, 50), Perbill::zero()); + assert_eq!(dummy_offence.slash_fraction(1), Perbill::zero()); assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(5, 50), + dummy_offence.slash_fraction(5), Perbill::zero(), // 0% ); assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(7, 50), + dummy_offence.slash_fraction(7), Perbill::from_parts(4200000), // 0.42% ); // One third offline should be punished around 5%. assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(17, 50), + dummy_offence.slash_fraction(17), Perbill::from_parts(46200000), // 4.62% ); } diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs index 34762e66ec301..09fbc55854288 100644 --- a/frame/im-online/src/weights.rs +++ b/frame/im-online/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_im_online. @@ -56,13 +56,13 @@ impl WeightInfo for SubstrateWeight { // Storage: ImOnline AuthoredBlocks (r:1 w:0) // Storage: ImOnline Keys (r:1 w:0) fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - (79_225_000 as Weight) + Weight::from_ref_time(79_225_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((41_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(Weight::from_ref_time(41_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) // Standard Error: 0 - .saturating_add((293_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(293_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -74,12 +74,12 @@ impl WeightInfo for () { // Storage: ImOnline AuthoredBlocks (r:1 w:0) // Storage: ImOnline Keys (r:1 w:0) fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - (79_225_000 as Weight) + Weight::from_ref_time(79_225_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((41_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(Weight::from_ref_time(41_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) // Standard Error: 0 - .saturating_add((293_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(293_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/indices/src/benchmarking.rs b/frame/indices/src/benchmarking.rs index cb06cd809f542..f462f22284d40 100644 --- a/frame/indices/src/benchmarking.rs +++ b/frame/indices/src/benchmarking.rs @@ -44,10 +44,11 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let recipient: T::AccountId = account("recipient", 0, SEED); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); // Claim the index Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; - }: _(RawOrigin::Signed(caller.clone()), recipient.clone(), account_index) + }: _(RawOrigin::Signed(caller.clone()), recipient_lookup, account_index) verify { assert_eq!(Accounts::::get(account_index).unwrap().0, recipient); } @@ -70,10 +71,11 @@ benchmarks! { let original: T::AccountId = account("original", 0, SEED); T::Currency::make_free_balance_be(&original, BalanceOf::::max_value()); let recipient: T::AccountId = account("recipient", 0, SEED); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); // Claim the index Indices::::claim(RawOrigin::Signed(original).into(), account_index)?; - }: _(RawOrigin::Root, recipient.clone(), account_index, false) + }: _(RawOrigin::Root, recipient_lookup, account_index, false) verify { assert_eq!(Accounts::::get(account_index).unwrap().0, recipient); } diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index ddc03c94b1233..49380f18e24db 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -36,6 +36,7 @@ pub use weights::WeightInfo; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; pub use pallet::*; @@ -133,10 +134,11 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::transfer())] pub fn transfer( origin: OriginFor, - new: T::AccountId, + new: AccountIdLookupOf, index: T::AccountIndex, ) -> DispatchResult { let who = ensure_signed(origin)?; + let new = T::Lookup::lookup(new)?; ensure!(who != new, Error::::NotTransfer); Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { @@ -208,11 +210,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_transfer())] pub fn force_transfer( origin: OriginFor, - new: T::AccountId, + new: AccountIdLookupOf, index: T::AccountIndex, freeze: bool, ) -> DispatchResult { ensure_root(origin)?; + let new = T::Lookup::lookup(new)?; Accounts::::mutate(index, |maybe_value| { if let Some((account, amount, _)) = maybe_value.take() { diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index 6bd79708c3dd2..693296a3b1064 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -44,7 +44,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/indices/src/tests.rs b/frame/indices/src/tests.rs index 73d591c38bb2f..4e6c59703ca36 100644 --- a/frame/indices/src/tests.rs +++ b/frame/indices/src/tests.rs @@ -22,6 +22,7 @@ use super::{mock::*, *}; use frame_support::{assert_noop, assert_ok}; use pallet_balances::Error as BalancesError; +use sp_runtime::MultiAddress::Id; #[test] fn claiming_should_work() { @@ -60,7 +61,7 @@ fn freezing_should_work() { assert_noop!(Indices::freeze(Some(1).into(), 0), Error::::Permanent); assert_noop!(Indices::free(Some(1).into(), 0), Error::::Permanent); - assert_noop!(Indices::transfer(Some(1).into(), 2, 0), Error::::Permanent); + assert_noop!(Indices::transfer(Some(1).into(), Id(2), 0), Error::::Permanent); }); } @@ -90,9 +91,9 @@ fn reclaim_index_on_accounts_should_work() { fn transfer_index_on_accounts_should_work() { new_test_ext().execute_with(|| { assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_noop!(Indices::transfer(Some(1).into(), 2, 1), Error::::NotAssigned); - assert_noop!(Indices::transfer(Some(2).into(), 3, 0), Error::::NotOwner); - assert_ok!(Indices::transfer(Some(1).into(), 3, 0)); + assert_noop!(Indices::transfer(Some(1).into(), Id(2), 1), Error::::NotAssigned); + assert_noop!(Indices::transfer(Some(2).into(), Id(3), 0), Error::::NotOwner); + assert_ok!(Indices::transfer(Some(1).into(), Id(3), 0)); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::reserved_balance(3), 1); assert_eq!(Indices::lookup_index(0), Some(3)); @@ -103,7 +104,7 @@ fn transfer_index_on_accounts_should_work() { fn force_transfer_index_on_preowned_should_work() { new_test_ext().execute_with(|| { assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_ok!(Indices::force_transfer(Origin::root(), 3, 0, false)); + assert_ok!(Indices::force_transfer(Origin::root(), Id(3), 0, false)); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::reserved_balance(3), 0); assert_eq!(Indices::lookup_index(0), Some(3)); @@ -113,7 +114,7 @@ fn force_transfer_index_on_preowned_should_work() { #[test] fn force_transfer_index_on_free_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Indices::force_transfer(Origin::root(), 3, 0, false)); + assert_ok!(Indices::force_transfer(Origin::root(), Id(3), 0, false)); assert_eq!(Balances::reserved_balance(3), 0); assert_eq!(Indices::lookup_index(0), Some(3)); }); diff --git a/frame/indices/src/weights.rs b/frame/indices/src/weights.rs index 6635d45272048..2475c86fd499b 100644 --- a/frame/indices/src/weights.rs +++ b/frame/indices/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_indices. @@ -56,35 +56,35 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Indices Accounts (r:1 w:1) fn claim() -> Weight { - (25_929_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_929_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (32_627_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(32_627_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) fn free() -> Weight { - (26_804_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_804_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - (27_390_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(27_390_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) fn freeze() -> Weight { - (30_973_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(30_973_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -92,34 +92,34 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Indices Accounts (r:1 w:1) fn claim() -> Weight { - (25_929_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_929_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (32_627_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(32_627_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) fn free() -> Weight { - (26_804_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_804_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - (27_390_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(27_390_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) fn freeze() -> Weight { - (30_973_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(30_973_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/lottery/src/weights.rs b/frame/lottery/src/weights.rs index 193958cfd41aa..f646ca02a0377 100644 --- a/frame/lottery/src/weights.rs +++ b/frame/lottery/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_lottery. @@ -63,30 +63,30 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Lottery Tickets (r:0 w:1) fn buy_ticket() -> Weight { - (44_706_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(44_706_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Lottery CallIndices (r:0 w:1) fn set_calls(n: u32, ) -> Weight { - (12_556_000 as Weight) + Weight::from_ref_time(12_556_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add((295_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(295_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Lottery Lottery (r:1 w:1) // Storage: Lottery LotteryIndex (r:1 w:1) // Storage: System Account (r:1 w:1) fn start_lottery() -> Weight { - (38_051_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(38_051_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Lottery Lottery (r:1 w:1) fn stop_repeat() -> Weight { - (6_910_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(6_910_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -94,9 +94,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Lottery TicketsCount (r:1 w:1) // Storage: Lottery Tickets (r:1 w:0) fn on_initialize_end() -> Weight { - (53_732_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(53_732_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -105,9 +105,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Lottery Tickets (r:1 w:0) // Storage: Lottery LotteryIndex (r:1 w:1) fn on_initialize_repeat() -> Weight { - (55_868_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(55_868_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } } @@ -121,30 +121,30 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Lottery Tickets (r:0 w:1) fn buy_ticket() -> Weight { - (44_706_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(44_706_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Lottery CallIndices (r:0 w:1) fn set_calls(n: u32, ) -> Weight { - (12_556_000 as Weight) + Weight::from_ref_time(12_556_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add((295_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(295_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Lottery Lottery (r:1 w:1) // Storage: Lottery LotteryIndex (r:1 w:1) // Storage: System Account (r:1 w:1) fn start_lottery() -> Weight { - (38_051_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(38_051_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Lottery Lottery (r:1 w:1) fn stop_repeat() -> Weight { - (6_910_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(6_910_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -152,9 +152,9 @@ impl WeightInfo for () { // Storage: Lottery TicketsCount (r:1 w:1) // Storage: Lottery Tickets (r:1 w:0) fn on_initialize_end() -> Weight { - (53_732_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(53_732_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -163,8 +163,8 @@ impl WeightInfo for () { // Storage: Lottery Tickets (r:1 w:0) // Storage: Lottery LotteryIndex (r:1 w:1) fn on_initialize_repeat() -> Weight { - (55_868_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(55_868_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } } diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 24ecfd5333c66..b4c9c3b38f1da 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -27,6 +27,7 @@ use frame_support::{ traits::{ChangeMembers, Contains, Get, InitializeMembers, SortedMembers}, BoundedVec, }; +use sp_runtime::traits::StaticLookup; use sp_std::prelude::*; pub mod migrations; @@ -35,6 +36,8 @@ pub mod weights; pub use pallet::*; pub use weights::WeightInfo; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -163,8 +166,9 @@ pub mod pallet { /// /// May only be called from `T::AddOrigin`. #[pallet::weight(50_000_000)] - pub fn add_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { + pub fn add_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::AddOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; let mut members = >::get(); let location = members.binary_search(&who).err().ok_or(Error::::AlreadyMember)?; @@ -184,8 +188,9 @@ pub mod pallet { /// /// May only be called from `T::RemoveOrigin`. #[pallet::weight(50_000_000)] - pub fn remove_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { + pub fn remove_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::RemoveOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; let mut members = >::get(); let location = members.binary_search(&who).ok().ok_or(Error::::NotMember)?; @@ -208,10 +213,12 @@ pub mod pallet { #[pallet::weight(50_000_000)] pub fn swap_member( origin: OriginFor, - remove: T::AccountId, - add: T::AccountId, + remove: AccountIdLookupOf, + add: AccountIdLookupOf, ) -> DispatchResult { T::SwapOrigin::ensure_origin(origin)?; + let remove = T::Lookup::lookup(remove)?; + let add = T::Lookup::lookup(add)?; if remove == add { return Ok(()) @@ -259,8 +266,9 @@ pub mod pallet { /// /// Prime membership is passed from the origin account to `new`, if extant. #[pallet::weight(50_000_000)] - pub fn change_key(origin: OriginFor, new: T::AccountId) -> DispatchResult { + pub fn change_key(origin: OriginFor, new: AccountIdLookupOf) -> DispatchResult { let remove = ensure_signed(origin)?; + let new = T::Lookup::lookup(new)?; if remove != new { let mut members = >::get(); @@ -292,8 +300,9 @@ pub mod pallet { /// /// May only be called from `T::PrimeOrigin`. #[pallet::weight(50_000_000)] - pub fn set_prime(origin: OriginFor, who: T::AccountId) -> DispatchResult { + pub fn set_prime(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::PrimeOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; Self::members().binary_search(&who).ok().ok_or(Error::::NotMember)?; Prime::::put(&who); T::MembershipChanged::set_prime(Some(who)); @@ -355,7 +364,8 @@ mod benchmark { assert_ok!(>::reset_members(reset_origin, members.clone())); if let Some(prime) = prime.map(|i| members[i].clone()) { - assert_ok!(>::set_prime(prime_origin, prime)); + let prime_lookup = T::Lookup::unlookup(prime); + assert_ok!(>::set_prime(prime_origin, prime_lookup)); } else { assert_ok!(>::clear_prime(prime_origin)); } @@ -368,8 +378,9 @@ mod benchmark { let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); set_members::(members, None); let new_member = account::("add", m, SEED); + let new_member_lookup = T::Lookup::unlookup(new_member.clone()); }: { - assert_ok!(>::add_member(T::AddOrigin::successful_origin(), new_member.clone())); + assert_ok!(>::add_member(T::AddOrigin::successful_origin(), new_member_lookup)); } verify { assert!(>::get().contains(&new_member)); @@ -385,8 +396,9 @@ mod benchmark { set_members::(members.clone(), Some(members.len() - 1)); let to_remove = members.first().cloned().unwrap(); + let to_remove_lookup = T::Lookup::unlookup(to_remove.clone()); }: { - assert_ok!(>::remove_member(T::RemoveOrigin::successful_origin(), to_remove.clone())); + assert_ok!(>::remove_member(T::RemoveOrigin::successful_origin(), to_remove_lookup)); } verify { assert!(!>::get().contains(&to_remove)); // prime is rejigged @@ -401,12 +413,14 @@ mod benchmark { let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); set_members::(members.clone(), Some(members.len() - 1)); let add = account::("member", m, SEED); + let add_lookup = T::Lookup::unlookup(add.clone()); let remove = members.first().cloned().unwrap(); + let remove_lookup = T::Lookup::unlookup(remove.clone()); }: { assert_ok!(>::swap_member( T::SwapOrigin::successful_origin(), - remove.clone(), - add.clone(), + remove_lookup, + add_lookup, )); } verify { assert!(!>::get().contains(&remove)); @@ -442,9 +456,10 @@ mod benchmark { set_members::(members.clone(), Some(members.len() - 1)); let add = account::("member", m, SEED); + let add_lookup = T::Lookup::unlookup(add.clone()); whitelist!(prime); }: { - assert_ok!(>::change_key(RawOrigin::Signed(prime.clone()).into(), add.clone())); + assert_ok!(>::change_key(RawOrigin::Signed(prime.clone()).into(), add_lookup)); } verify { assert!(!>::get().contains(&prime)); assert!(>::get().contains(&add)); @@ -457,9 +472,10 @@ mod benchmark { let m in 1 .. T::MaxMembers::get(); let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); let prime = members.last().cloned().unwrap(); + let prime_lookup = T::Lookup::unlookup(prime.clone()); set_members::(members, None); }: { - assert_ok!(>::set_prime(T::PrimeOrigin::successful_origin(), prime)); + assert_ok!(>::set_prime(T::PrimeOrigin::successful_origin(), prime_lookup)); } verify { assert!(>::get().is_some()); assert!(::get_prime().is_some()); @@ -516,7 +532,7 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); pub static Members: Vec = vec![]; pub static Prime: Option = None; } diff --git a/frame/membership/src/migrations/v4.rs b/frame/membership/src/migrations/v4.rs index b3b52751d9598..5b8735aa2bac9 100644 --- a/frame/membership/src/migrations/v4.rs +++ b/frame/membership/src/migrations/v4.rs @@ -46,7 +46,7 @@ pub fn migrate::on_chain_storage_version(); @@ -71,7 +71,7 @@ pub fn migrate WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn add_member(m: u32, ) -> Weight { - (15_318_000 as Weight) + Weight::from_ref_time(15_318_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((51_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(51_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -73,11 +73,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn remove_member(m: u32, ) -> Weight { - (18_005_000 as Weight) + Weight::from_ref_time(18_005_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((45_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(45_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -85,11 +85,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn swap_member(m: u32, ) -> Weight { - (18_029_000 as Weight) + Weight::from_ref_time(18_029_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((55_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -97,11 +97,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn reset_member(m: u32, ) -> Weight { - (18_105_000 as Weight) + Weight::from_ref_time(18_105_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((158_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -109,29 +109,29 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn change_key(m: u32, ) -> Weight { - (18_852_000 as Weight) + Weight::from_ref_time(18_852_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((55_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:0) // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn set_prime(m: u32, ) -> Weight { - (4_869_000 as Weight) + Weight::from_ref_time(4_869_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((28_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(28_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn clear_prime(m: u32, ) -> Weight { - (1_593_000 as Weight) + Weight::from_ref_time(1_593_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -142,11 +142,11 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn add_member(m: u32, ) -> Weight { - (15_318_000 as Weight) + Weight::from_ref_time(15_318_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((51_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(51_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -154,11 +154,11 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn remove_member(m: u32, ) -> Weight { - (18_005_000 as Weight) + Weight::from_ref_time(18_005_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((45_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(45_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -166,11 +166,11 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn swap_member(m: u32, ) -> Weight { - (18_029_000 as Weight) + Weight::from_ref_time(18_029_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((55_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -178,11 +178,11 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn reset_member(m: u32, ) -> Weight { - (18_105_000 as Weight) + Weight::from_ref_time(18_105_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((158_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -190,28 +190,28 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn change_key(m: u32, ) -> Weight { - (18_852_000 as Weight) + Weight::from_ref_time(18_852_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((55_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:0) // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn set_prime(m: u32, ) -> Weight { - (4_869_000 as Weight) + Weight::from_ref_time(4_869_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((28_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(28_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn clear_prime(m: u32, ) -> Weight { - (1_593_000 as Weight) + Weight::from_ref_time(1_593_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index f5f8bdee0855d..75301afed0094 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -27,6 +27,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives [dev-dependencies] env_logger = "0.9" hex-literal = "0.3" +itertools = "0.10.3" [features] default = ["std"] diff --git a/frame/merkle-mountain-range/rpc/Cargo.toml b/frame/merkle-mountain-range/rpc/Cargo.toml index 45cb975df277b..3da6678a39bf2 100644 --- a/frame/merkle-mountain-range/rpc/Cargo.toml +++ b/frame/merkle-mountain-range/rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } serde = { version = "1.0.136", features = ["derive"] } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } @@ -23,4 +23,4 @@ sp-mmr-primitives = { version = "4.0.0-dev", path = "../../../primitives/merkle- sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" diff --git a/frame/merkle-mountain-range/src/default_weights.rs b/frame/merkle-mountain-range/src/default_weights.rs index 73d1963a42964..e7a4b6ab31c4a 100644 --- a/frame/merkle-mountain-range/src/default_weights.rs +++ b/frame/merkle-mountain-range/src/default_weights.rs @@ -30,7 +30,7 @@ impl crate::WeightInfo for () { // Blake2 hash cost. let hash_weight = 2 * WEIGHT_PER_NANOS; // No-op hook. - let hook_weight = 0; + let hook_weight = Weight::zero(); leaf_weight .saturating_add(hash_weight) diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index d6cf3240692fc..4644ebcb7da1c 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -58,7 +58,10 @@ use codec::Encode; use frame_support::weights::Weight; -use sp_runtime::traits::{self, One, Saturating}; +use sp_runtime::{ + traits::{self, One, Saturating}, + SaturatedConversion, +}; #[cfg(any(feature = "runtime-benchmarks", test))] mod benchmarking; @@ -116,12 +119,12 @@ pub mod pallet { /// Prefix for elements stored in the Off-chain DB via Indexing API. /// /// Each node of the MMR is inserted both on-chain and off-chain via Indexing API. - /// The former does not store full leaf content, just it's compact version (hash), + /// The former does not store full leaf content, just its compact version (hash), /// and some of the inner mmr nodes might be pruned from on-chain storage. /// The latter will contain all the entries in their full form. /// /// Each node is stored in the Off-chain DB under key derived from the - /// [`Self::INDEXING_PREFIX`] and it's in-tree index (MMR position). + /// [`Self::INDEXING_PREFIX`] and its in-tree index (MMR position). const INDEXING_PREFIX: &'static [u8]; /// A hasher type for MMR. @@ -162,6 +165,12 @@ pub mod pallet { /// /// Note that the leaf at each block MUST be unique. You may want to include a block hash or /// block number as an easiest way to ensure that. + /// Also note that the leaf added by each block is expected to only reference data coming + /// from ancestor blocks (leaves are saved offchain using `(parent_hash, pos)` key to be + /// fork-resistant, as such conflicts could only happen on 1-block deep forks, which means + /// two forks with identical line of ancestors compete to write the same offchain key, but + /// that's fine as long as leaves only contain data coming from ancestors - conflicting + /// writes are identical). type LeafData: primitives::LeafDataProvider; /// A hook to act on the new MMR root. @@ -215,8 +224,31 @@ pub mod pallet { >::put(root); let peaks_after = mmr::utils::NodesUtils::new(leaves).number_of_peaks(); + T::WeightInfo::on_initialize(peaks_before.max(peaks_after)) } + + fn offchain_worker(n: T::BlockNumber) { + use mmr::storage::{OffchainStorage, Storage}; + // MMR pallet uses offchain storage to hold full MMR and leaves. + // The leaves are saved under fork-unique keys `(parent_hash, pos)`. + // MMR Runtime depends on `frame_system::block_hash(block_num)` mappings to find + // parent hashes for particular nodes or leaves. + // This MMR offchain worker function moves a rolling window of the same size + // as `frame_system::block_hash` map, where nodes/leaves added by blocks that are just + // about to exit the window are "canonicalized" so that their offchain key no longer + // depends on `parent_hash` therefore on access to `frame_system::block_hash`. + // + // This approach works to eliminate fork-induced leaf collisions in offchain db, + // under the assumption that no fork will be deeper than `frame_system::BlockHashCount` + // blocks (2400 blocks on Polkadot, Kusama, Rococo, etc): + // entries pertaining to block `N` where `N < current-2400` are moved to a key based + // solely on block number. The only way to have collisions is if two competing forks + // are deeper than 2400 blocks and they both "canonicalize" their view of block `N`. + // Once a block is canonicalized, all MMR entries pertaining to sibling blocks from + // other forks are pruned from offchain db. + Storage::>::canonicalize_and_prune(n); + } } } @@ -254,9 +286,38 @@ where } impl, I: 'static> Pallet { - fn offchain_key(pos: NodeIndex) -> sp_std::prelude::Vec { + /// Build offchain key from `parent_hash` of block that originally added node `pos` to MMR. + /// + /// This combination makes the offchain (key,value) entry resilient to chain forks. + fn node_offchain_key( + parent_hash: ::Hash, + pos: NodeIndex, + ) -> sp_std::prelude::Vec { + (T::INDEXING_PREFIX, parent_hash, pos).encode() + } + + /// Build canonical offchain key for node `pos` in MMR. + /// + /// Used for nodes added by now finalized blocks. + fn node_canon_offchain_key(pos: NodeIndex) -> sp_std::prelude::Vec { (T::INDEXING_PREFIX, pos).encode() } + + /// Provide the parent number for the block that added `leaf_index` to the MMR. + fn leaf_index_to_parent_block_num( + leaf_index: LeafIndex, + leaves_count: LeafIndex, + ) -> ::BlockNumber { + // leaves are zero-indexed and were added one per block since pallet activation, + // while block numbers are one-indexed, so block number that added `leaf_idx` is: + // `block_num = block_num_when_pallet_activated + leaf_idx + 1` + // `block_num = (current_block_num - leaves_count) + leaf_idx + 1` + // `parent_block_num = current_block_num - leaves_count + leaf_idx`. + >::block_number() + .saturating_sub(leaves_count.saturated_into()) + .saturating_add(leaf_index.saturated_into()) + } + /// Generate a MMR proof for the given `leaf_indices`. /// /// Note this method can only be used from an off-chain context @@ -264,7 +325,7 @@ impl, I: 'static> Pallet { /// all the leaves to be present. /// It may return an error or panic if used incorrectly. pub fn generate_batch_proof( - leaf_indices: Vec, + leaf_indices: Vec, ) -> Result< (Vec>, primitives::BatchProof<>::Hash>), primitives::Error, @@ -290,7 +351,7 @@ impl, I: 'static> Pallet { ) -> Result<(), primitives::Error> { if proof.leaf_count > Self::mmr_leaves() || proof.leaf_count == 0 || - proof.items.len() as u32 > mmr::utils::NodesUtils::new(proof.leaf_count).depth() + (proof.items.len().saturating_add(leaves.len())) as u64 > proof.leaf_count { return Err(primitives::Error::Verify .log_debug("The proof has incorrect number of leaves or proof items.")) diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs index 535057ca80da7..8b623edf56957 100644 --- a/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -18,8 +18,11 @@ //! A MMR storage implementations. use codec::Encode; +use frame_support::traits::Get; use mmr_lib::helper; -use sp_io::offchain_index; +use sp_core::offchain::StorageKind; +use sp_io::{offchain, offchain_index}; +use sp_runtime::traits::UniqueSaturatedInto; use sp_std::iter::Peekable; #[cfg(not(feature = "std"))] use sp_std::prelude::*; @@ -46,6 +49,51 @@ pub struct RuntimeStorage; /// DOES NOT support adding new items to the MMR. pub struct OffchainStorage; +/// Suffix of key for the 'pruning_map'. +/// +/// Nodes and leaves are initially saved under fork-specific keys in offchain db, +/// eventually they are "canonicalized" and this map is used to prune non-canon entries. +const OFFCHAIN_PRUNING_MAP_KEY_SUFFIX: &str = "pruning_map"; + +/// Used to store offchain mappings of `BlockNumber -> Vec[Hash]` to track all forks. +/// Size of this offchain map is at most `frame_system::BlockHashCount`, its entries are pruned +/// as part of the mechanism that prunes the forks this map tracks. +pub(crate) struct PruningMap(sp_std::marker::PhantomData<(T, I)>); +impl PruningMap +where + T: Config, + I: 'static, +{ + pub(crate) fn pruning_map_offchain_key(block: T::BlockNumber) -> sp_std::prelude::Vec { + (T::INDEXING_PREFIX, block, OFFCHAIN_PRUNING_MAP_KEY_SUFFIX).encode() + } + + /// Append `hash` to the list of parent hashes for `block` in offchain db. + pub fn append(block: T::BlockNumber, hash: ::Hash) { + let map_key = Self::pruning_map_offchain_key(block); + offchain::local_storage_get(StorageKind::PERSISTENT, &map_key) + .and_then(|v| codec::Decode::decode(&mut &*v).ok()) + .or_else(|| Some(Vec::<::Hash>::new())) + .map(|mut parents| { + parents.push(hash); + offchain::local_storage_set( + StorageKind::PERSISTENT, + &map_key, + &Encode::encode(&parents), + ); + }); + } + + /// Remove list of parent hashes for `block` from offchain db and return it. + pub fn remove(block: T::BlockNumber) -> Option::Hash>> { + let map_key = Self::pruning_map_offchain_key(block); + offchain::local_storage_get(StorageKind::PERSISTENT, &map_key).and_then(|v| { + offchain::local_storage_clear(StorageKind::PERSISTENT, &map_key); + codec::Decode::decode(&mut &*v).ok() + }) + } +} + /// A storage layer for MMR. /// /// There are two different implementations depending on the use case. @@ -58,6 +106,109 @@ impl Default for Storage { } } +impl Storage +where + T: Config, + I: 'static, + L: primitives::FullLeaf, +{ + /// Move nodes and leaves added by block `N` in offchain db from _fork-aware key_ to + /// _canonical key_, + /// where `N` is `frame_system::BlockHashCount` blocks behind current block number. + /// + /// This "canonicalization" process is required because the _fork-aware key_ value depends + /// on `frame_system::block_hash(block_num)` map which only holds the last + /// `frame_system::BlockHashCount` blocks. + /// + /// For the canonicalized block, prune all nodes pertaining to other forks from offchain db. + /// + /// Should only be called from offchain context, because it requires both read and write + /// access to offchain db. + pub(crate) fn canonicalize_and_prune(block: T::BlockNumber) { + // Add "block_num -> hash" mapping to offchain db, + // with all forks pushing hashes to same entry (same block number). + let parent_hash = >::parent_hash(); + PruningMap::::append(block, parent_hash); + + // Effectively move a rolling window of fork-unique leaves. Once out of the window, leaves + // are "canonicalized" in offchain by moving them under `Pallet::node_canon_offchain_key`. + let leaves = NumberOfLeaves::::get(); + let window_size = + ::BlockHashCount::get().unique_saturated_into(); + if leaves >= window_size { + // Move the rolling window towards the end of `block_num->hash` mappings available + // in the runtime: we "canonicalize" the leaf at the end, + let to_canon_leaf = leaves.saturating_sub(window_size); + // and all the nodes added by that leaf. + let to_canon_nodes = NodesUtils::right_branch_ending_in_leaf(to_canon_leaf); + frame_support::log::debug!( + target: "runtime::mmr::offchain", "Nodes to canon for leaf {}: {:?}", + to_canon_leaf, to_canon_nodes + ); + // For this block number there may be node entries saved from multiple forks. + let to_canon_block_num = + Pallet::::leaf_index_to_parent_block_num(to_canon_leaf, leaves); + // Only entries under this hash (retrieved from state on current canon fork) are to be + // persisted. All other entries added by same block number will be cleared. + let to_canon_hash = >::block_hash(to_canon_block_num); + + Self::canonicalize_nodes_for_hash(&to_canon_nodes, to_canon_hash); + // Get all the forks to prune, also remove them from the offchain pruning_map. + PruningMap::::remove(to_canon_block_num) + .map(|forks| { + Self::prune_nodes_for_forks(&to_canon_nodes, forks); + }) + .unwrap_or_else(|| { + frame_support::log::error!( + target: "runtime::mmr::offchain", + "Offchain: could not prune: no entry in pruning map for block {:?}", + to_canon_block_num + ); + }) + } + } + + fn prune_nodes_for_forks(nodes: &[NodeIndex], forks: Vec<::Hash>) { + for hash in forks { + for pos in nodes { + let key = Pallet::::node_offchain_key(hash, *pos); + frame_support::log::debug!( + target: "runtime::mmr::offchain", + "Clear elem at pos {} with key {:?}", + pos, key + ); + offchain::local_storage_clear(StorageKind::PERSISTENT, &key); + } + } + } + + fn canonicalize_nodes_for_hash( + to_canon_nodes: &[NodeIndex], + to_canon_hash: ::Hash, + ) { + for pos in to_canon_nodes { + let key = Pallet::::node_offchain_key(to_canon_hash, *pos); + // Retrieve the element from Off-chain DB under fork-aware key. + if let Some(elem) = offchain::local_storage_get(StorageKind::PERSISTENT, &key) { + let canon_key = Pallet::::node_canon_offchain_key(*pos); + // Add under new canon key. + offchain::local_storage_set(StorageKind::PERSISTENT, &canon_key, &elem); + frame_support::log::debug!( + target: "runtime::mmr::offchain", + "Moved elem at pos {} from key {:?} to canon key {:?}", + pos, key, canon_key + ); + } else { + frame_support::log::error!( + target: "runtime::mmr::offchain", + "Could not canonicalize elem at pos {} using key {:?}", + pos, key + ); + } + } + } +} + impl mmr_lib::MMRStore> for Storage where T: Config, @@ -65,9 +216,49 @@ where L: primitives::FullLeaf + codec::Decode, { fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result>> { - let key = Pallet::::offchain_key(pos); + let leaves = NumberOfLeaves::::get(); + // Find out which leaf added node `pos` in the MMR. + let ancestor_leaf_idx = NodesUtils::leaf_index_that_added_node(pos); + + let window_size = + ::BlockHashCount::get().unique_saturated_into(); + // Leaves older than this window should have been canonicalized. + if leaves.saturating_sub(ancestor_leaf_idx) > window_size { + let key = Pallet::::node_canon_offchain_key(pos); + frame_support::log::debug!( + target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, key {:?}", + pos, ancestor_leaf_idx, key + ); + // Just for safety, to easily handle runtime upgrades where any of the window params + // change and maybe we mess up storage migration, + // return _if and only if_ node is found (in normal conditions it's always found), + if let Some(elem) = + sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + { + return Ok(codec::Decode::decode(&mut &*elem).ok()) + } + // BUT if we DID MESS UP, fall through to searching node using fork-specific key. + } + + // Leaves still within the window will be found in offchain db under fork-aware keys. + let ancestor_parent_block_num = + Pallet::::leaf_index_to_parent_block_num(ancestor_leaf_idx, leaves); + let ancestor_parent_hash = >::block_hash(ancestor_parent_block_num); + let key = Pallet::::node_offchain_key(ancestor_parent_hash, pos); + frame_support::log::debug!( + target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, hash {:?}, key {:?}", + pos, ancestor_leaf_idx, ancestor_parent_hash, key + ); // Retrieve the element from Off-chain DB. Ok(sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + .or_else(|| { + // Again, this is just us being extra paranoid. + // We get here only if we mess up a storage migration for a runtime upgrades where + // say the window is increased, and for a little while following the upgrade there's + // leaves inside new 'window' that had been already canonicalized before upgrade. + let key = Pallet::::node_canon_offchain_key(pos); + sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + }) .and_then(|v| codec::Decode::decode(&mut &*v).ok())) } @@ -91,9 +282,11 @@ where return Ok(()) } - sp_std::if_std! { - frame_support::log::trace!("elems: {:?}", elems.iter().map(|elem| elem.hash()).collect::>()); - } + frame_support::log::trace!( + target: "runtime::mmr", + "elems: {:?}", + elems.iter().map(|elem| elem.hash()).collect::>() + ); let leaves = NumberOfLeaves::::get(); let size = NodesUtils::new(leaves).size(); @@ -112,11 +305,24 @@ where let mut leaf_index = leaves; let mut node_index = size; + // Use parent hash of block adding new nodes (this block) as extra identifier + // in offchain DB to avoid DB collisions and overwrites in case of forks. + let parent_hash = >::parent_hash(); for elem in elems { + // For now we store this leaf offchain keyed by `(parent_hash, node_index)` + // to make it fork-resistant. + // Offchain worker task will "canonicalize" it `frame_system::BlockHashCount` blocks + // later when we are not worried about forks anymore (highly unlikely to have a fork + // in the chain that deep). + // "Canonicalization" in this case means moving this leaf under a new key based + // only on the leaf's `node_index`. + let key = Pallet::::node_offchain_key(parent_hash, node_index); + frame_support::log::debug!( + target: "runtime::mmr::offchain", "offchain db set: pos {} parent_hash {:?} key {:?}", + node_index, parent_hash, key + ); // Indexing API is used to store the full node content (both leaf and inner). - elem.using_encoded(|elem| { - offchain_index::set(&Pallet::::offchain_key(node_index), elem) - }); + elem.using_encoded(|elem| offchain_index::set(&key, elem)); // On-chain we are going to only store new peaks. if peaks_to_store.next_if_eq(&node_index).is_some() { @@ -150,10 +356,8 @@ fn peaks_to_prune_and_store( // both collections may share a common prefix. let peaks_before = if old_size == 0 { vec![] } else { helper::get_peaks(old_size) }; let peaks_after = helper::get_peaks(new_size); - sp_std::if_std! { - frame_support::log::trace!("peaks_before: {:?}", peaks_before); - frame_support::log::trace!("peaks_after: {:?}", peaks_after); - } + frame_support::log::trace!(target: "runtime::mmr", "peaks_before: {:?}", peaks_before); + frame_support::log::trace!(target: "runtime::mmr", "peaks_after: {:?}", peaks_after); let mut peaks_before = peaks_before.into_iter().peekable(); let mut peaks_after = peaks_after.into_iter().peekable(); diff --git a/frame/merkle-mountain-range/src/mmr/utils.rs b/frame/merkle-mountain-range/src/mmr/utils.rs index d9f7e3b671be3..0b8e88a9283da 100644 --- a/frame/merkle-mountain-range/src/mmr/utils.rs +++ b/frame/merkle-mountain-range/src/mmr/utils.rs @@ -18,6 +18,7 @@ //! Merkle Mountain Range utilities. use crate::primitives::{LeafIndex, NodeIndex}; +use mmr_lib::helper; /// MMR nodes & size -related utilities. pub struct NodesUtils { @@ -45,33 +46,80 @@ impl NodesUtils { 2 * self.no_of_leaves - self.number_of_peaks() } - /// Calculate maximal depth of the MMR. - pub fn depth(&self) -> u32 { - if self.no_of_leaves == 0 { + /// Calculate `LeafIndex` for the leaf that added `node_index` to the MMR. + pub fn leaf_index_that_added_node(node_index: NodeIndex) -> LeafIndex { + let rightmost_leaf_pos = Self::rightmost_leaf_node_index_from_pos(node_index); + Self::leaf_node_index_to_leaf_index(rightmost_leaf_pos) + } + + // Translate a _leaf_ `NodeIndex` to its `LeafIndex`. + fn leaf_node_index_to_leaf_index(pos: NodeIndex) -> LeafIndex { + if pos == 0 { return 0 } + let peaks = helper::get_peaks(pos); + (pos + peaks.len() as u64) >> 1 + } - 64 - self.no_of_leaves.next_power_of_two().leading_zeros() + // Starting from any node position get position of rightmost leaf; this is the leaf + // responsible for the addition of node `pos`. + fn rightmost_leaf_node_index_from_pos(pos: NodeIndex) -> NodeIndex { + pos - (helper::pos_height_in_tree(pos) as u64) + } + + /// Starting from any leaf index, get the sequence of positions of the nodes added + /// to the mmr when this leaf was added (inclusive of the leaf's position itself). + /// That is, all of these nodes are right children of their respective parents. + pub fn right_branch_ending_in_leaf(leaf_index: LeafIndex) -> crate::Vec { + let pos = helper::leaf_index_to_pos(leaf_index); + let num_parents = leaf_index.trailing_ones() as u64; + return (pos..=pos + num_parents).collect() } } #[cfg(test)] mod tests { use super::*; + use mmr_lib::helper::leaf_index_to_pos; #[test] - fn should_calculate_number_of_leaves_correctly() { - assert_eq!( - vec![0, 1, 2, 3, 4, 9, 15, 21] - .into_iter() - .map(|n| NodesUtils::new(n).depth()) - .collect::>(), - vec![0, 1, 2, 3, 3, 5, 5, 6] - ); + fn should_calculate_node_index_from_leaf_index() { + for index in 0..100000 { + let pos = leaf_index_to_pos(index); + assert_eq!(NodesUtils::leaf_node_index_to_leaf_index(pos), index); + } + } + + #[test] + fn should_calculate_right_branch_correctly() { + fn left_jump_sequence(leaf_index: LeafIndex) -> Vec { + let pos = leaf_index_to_pos(leaf_index); + let mut right_branch_ending_in_leaf = vec![pos]; + let mut next_pos = pos + 1; + while mmr_lib::helper::pos_height_in_tree(next_pos) > 0 { + right_branch_ending_in_leaf.push(next_pos); + next_pos += 1; + } + right_branch_ending_in_leaf + } + + for leaf_index in 0..100000 { + let pos = mmr_lib::helper::leaf_index_to_pos(leaf_index); + assert_eq!(NodesUtils::right_branch_ending_in_leaf(pos), left_jump_sequence(pos)); + } + } + + #[test] + fn should_calculate_rightmost_leaf_node_index_from_pos() { + for pos in 0..100000 { + let leaf_pos = NodesUtils::rightmost_leaf_node_index_from_pos(pos); + let leaf_index = NodesUtils::leaf_node_index_to_leaf_index(leaf_pos); + assert!(NodesUtils::right_branch_ending_in_leaf(leaf_index).contains(&pos)); + } } #[test] - fn should_calculate_depth_correclty() { + fn should_calculate_depth_correctly() { assert_eq!( vec![0, 1, 2, 3, 4, 9, 15, 21] .into_iter() diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index d025910a9ee5c..e13f89617bb9a 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -15,9 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{mmr::utils, mock::*, *}; +use crate::{ + mmr::{storage::PruningMap, utils}, + mock::*, + *, +}; -use frame_support::traits::OnInitialize; +use frame_support::traits::{Get, OnInitialize}; use mmr_lib::helper; use sp_core::{ offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, @@ -35,7 +39,7 @@ fn register_offchain_ext(ext: &mut sp_io::TestExternalities) { ext.register_extension(OffchainWorkerExt::new(offchain)); } -fn new_block() -> u64 { +fn new_block() -> Weight { let number = frame_system::Pallet::::block_number() + 1; let hash = H256::repeat_byte(number as u8); LEAF_DATA.with(|r| r.borrow_mut().a = number); @@ -47,7 +51,6 @@ fn new_block() -> u64 { fn peaks_from_leaves_count(leaves_count: NodeIndex) -> Vec { let size = utils::NodesUtils::new(leaves_count).size(); - helper::get_peaks(size) } @@ -73,7 +76,7 @@ fn decode_node( } } -fn init_chain(blocks: usize) { +fn add_blocks(blocks: usize) { // given for _ in 0..blocks { new_block(); @@ -107,7 +110,7 @@ fn should_start_empty() { crate::RootHash::::get(), hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0") ); - assert!(weight != 0); + assert!(weight != Weight::zero()); }); } @@ -115,9 +118,10 @@ fn should_start_empty() { fn should_append_to_mmr_when_on_initialize_is_called() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); - ext.execute_with(|| { + let (parent_b1, parent_b2) = ext.execute_with(|| { // when new_block(); + let parent_b1 = >::parent_hash(); // then assert_eq!(crate::NumberOfLeaves::::get(), 1); @@ -136,6 +140,7 @@ fn should_append_to_mmr_when_on_initialize_is_called() { // when new_block(); + let parent_b2 = >::parent_hash(); // then assert_eq!(crate::NumberOfLeaves::::get(), 2); @@ -157,26 +162,33 @@ fn should_append_to_mmr_when_on_initialize_is_called() { hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), ) ); - }); + (parent_b1, parent_b2) + }); // make sure the leaves end up in the offchain DB ext.persist_offchain_overlay(); + let offchain_db = ext.offchain_db(); assert_eq!( - offchain_db.get(&MMR::offchain_key(0)).map(decode_node), + offchain_db.get(&MMR::node_offchain_key(parent_b1, 0)).map(decode_node), Some(mmr::Node::Data(((0, H256::repeat_byte(1)), LeafData::new(1),))) ); assert_eq!( - offchain_db.get(&MMR::offchain_key(1)).map(decode_node), + offchain_db.get(&MMR::node_offchain_key(parent_b2, 1)).map(decode_node), Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2),))) ); assert_eq!( - offchain_db.get(&MMR::offchain_key(2)).map(decode_node), + offchain_db.get(&MMR::node_offchain_key(parent_b2, 2)).map(decode_node), Some(mmr::Node::Hash(hex( "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854" ))) ); - assert_eq!(offchain_db.get(&MMR::offchain_key(3)), None); + assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_b2, 3)), None); + + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(0)), None); + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(1)), None); + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(2)), None); + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(3)), None); } #[test] @@ -184,7 +196,7 @@ fn should_construct_larger_mmr_correctly() { let _ = env_logger::try_init(); new_test_ext().execute_with(|| { // when - init_chain(7); + add_blocks(7); // then assert_eq!(crate::NumberOfLeaves::::get(), 7); @@ -215,7 +227,7 @@ fn should_generate_proofs_correctly() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); // given - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proofs now. This requires the offchain extensions to be present @@ -283,7 +295,7 @@ fn should_generate_batch_proof_correctly() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); // given - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proofs now. This requires the offchain extensions to be present @@ -316,7 +328,7 @@ fn should_verify() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -328,35 +340,71 @@ fn should_verify() { }); ext.execute_with(|| { - init_chain(7); + add_blocks(7); // then assert_eq!(crate::Pallet::::verify_leaves(leaves, proof5), Ok(())); }); } #[test] -fn should_verify_batch_proof() { +fn should_verify_batch_proofs() { + fn generate_and_verify_batch_proof( + ext: &mut sp_io::TestExternalities, + leaves: &Vec, + blocks_to_add: usize, + ) { + let (leaves, proof) = ext + .execute_with(|| crate::Pallet::::generate_batch_proof(leaves.to_vec()).unwrap()); + + ext.execute_with(|| { + add_blocks(blocks_to_add); + // then + assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); + }) + } + let _ = env_logger::try_init(); - // Start off with chain initialisation and storing indexing data off-chain - // (MMR Leafs) - let mut ext = new_test_ext(); - ext.execute_with(|| init_chain(7)); - ext.persist_offchain_overlay(); + use itertools::Itertools; - // Try to generate proof now. This requires the offchain extensions to be present - // to retrieve full leaf data. + let mut ext = new_test_ext(); + // require the offchain extensions to be present + // to retrieve full leaf data when generating proofs register_offchain_ext(&mut ext); - let (leaves, proof) = ext.execute_with(|| { - // when - crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap() - }); - ext.execute_with(|| { - init_chain(7); - // then - assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); - }); + // verify that up to n=10, valid proofs are generated for all possible leaf combinations + for n in 0..10 { + ext.execute_with(|| new_block()); + ext.persist_offchain_overlay(); + + // generate powerset (skipping empty set) of all possible leaf combinations for mmr size n + let leaves_set: Vec> = (0..n).into_iter().powerset().skip(1).collect(); + + leaves_set.iter().for_each(|leaves_subset| { + generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); + ext.persist_offchain_overlay(); + }); + } + + // verify that up to n=15, valid proofs are generated for all possible 2-leaf combinations + for n in 10..15 { + // (MMR Leafs) + ext.execute_with(|| new_block()); + ext.persist_offchain_overlay(); + + // generate all possible 2-leaf combinations for mmr size n + let leaves_set: Vec> = (0..n).into_iter().combinations(2).collect(); + + leaves_set.iter().for_each(|leaves_subset| { + generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); + ext.persist_offchain_overlay(); + }); + } + + generate_and_verify_batch_proof(&mut ext, &vec![7, 11], 20); + ext.execute_with(|| add_blocks(1000)); + ext.persist_offchain_overlay(); + generate_and_verify_batch_proof(&mut ext, &vec![7, 11, 100, 800], 100); } #[test] @@ -366,7 +414,7 @@ fn verification_should_be_stateless() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -393,7 +441,7 @@ fn should_verify_batch_proof_statelessly() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -424,7 +472,7 @@ fn should_verify_on_the_next_block_since_there_is_no_pruning_yet() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); // given - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); register_offchain_ext(&mut ext); @@ -438,3 +486,238 @@ fn should_verify_on_the_next_block_since_there_is_no_pruning_yet() { assert_eq!(crate::Pallet::::verify_leaves(leaves, proof5), Ok(())); }); } + +#[test] +fn should_verify_pruning_map() { + use sp_core::offchain::StorageKind; + use sp_io::offchain; + + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + register_offchain_ext(&mut ext); + + ext.execute_with(|| { + type TestPruningMap = PruningMap; + fn offchain_decoded(key: Vec) -> Option> { + offchain::local_storage_get(StorageKind::PERSISTENT, &key) + .and_then(|v| codec::Decode::decode(&mut &*v).ok()) + } + + // test append + { + TestPruningMap::append(1, H256::repeat_byte(1)); + + TestPruningMap::append(2, H256::repeat_byte(21)); + TestPruningMap::append(2, H256::repeat_byte(22)); + + TestPruningMap::append(3, H256::repeat_byte(31)); + TestPruningMap::append(3, H256::repeat_byte(32)); + TestPruningMap::append(3, H256::repeat_byte(33)); + + // `0` not present + let map_key = TestPruningMap::pruning_map_offchain_key(0); + assert_eq!(offchain::local_storage_get(StorageKind::PERSISTENT, &map_key), None); + + // verify `1` entries + let map_key = TestPruningMap::pruning_map_offchain_key(1); + let expected = vec![H256::repeat_byte(1)]; + assert_eq!(offchain_decoded(map_key), Some(expected)); + + // verify `2` entries + let map_key = TestPruningMap::pruning_map_offchain_key(2); + let expected = vec![H256::repeat_byte(21), H256::repeat_byte(22)]; + assert_eq!(offchain_decoded(map_key), Some(expected)); + + // verify `3` entries + let map_key = TestPruningMap::pruning_map_offchain_key(3); + let expected = + vec![H256::repeat_byte(31), H256::repeat_byte(32), H256::repeat_byte(33)]; + assert_eq!(offchain_decoded(map_key), Some(expected)); + + // `4` not present + let map_key = TestPruningMap::pruning_map_offchain_key(4); + assert_eq!(offchain::local_storage_get(StorageKind::PERSISTENT, &map_key), None); + } + + // test remove + { + // `0` doesn't return anything + assert_eq!(TestPruningMap::remove(0), None); + + // remove and verify `1` entries + let expected = vec![H256::repeat_byte(1)]; + assert_eq!(TestPruningMap::remove(1), Some(expected)); + + // remove and verify `2` entries + let expected = vec![H256::repeat_byte(21), H256::repeat_byte(22)]; + assert_eq!(TestPruningMap::remove(2), Some(expected)); + + // remove and verify `3` entries + let expected = + vec![H256::repeat_byte(31), H256::repeat_byte(32), H256::repeat_byte(33)]; + assert_eq!(TestPruningMap::remove(3), Some(expected)); + + // `4` doesn't return anything + assert_eq!(TestPruningMap::remove(4), None); + + // no entries left in offchain map + for block in 0..5 { + let map_key = TestPruningMap::pruning_map_offchain_key(block); + assert_eq!(offchain::local_storage_get(StorageKind::PERSISTENT, &map_key), None); + } + } + }) +} + +#[test] +fn should_canonicalize_offchain() { + use frame_support::traits::Hooks; + + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + register_offchain_ext(&mut ext); + + // adding 13 blocks that we'll later check have been canonicalized, + // (test assumes `13 < frame_system::BlockHashCount`). + let to_canon_count = 13u32; + + // add 13 blocks and verify leaves and nodes for them have been added to + // offchain MMR using fork-proof keys. + for blocknum in 0..to_canon_count { + ext.execute_with(|| { + new_block(); + as Hooks>::offchain_worker(blocknum.into()); + }); + ext.persist_offchain_overlay(); + } + let offchain_db = ext.offchain_db(); + ext.execute_with(|| { + // verify leaves added by blocks 1..=13 + for block_num in 1..=to_canon_count { + let parent_num: BlockNumber = (block_num - 1).into(); + let leaf_index = u64::from(block_num - 1); + let pos = helper::leaf_index_to_pos(leaf_index.into()); + // not canon, + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(pos)), None); + let parent_hash = >::block_hash(parent_num); + // but available in fork-proof storage. + assert_eq!( + offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)).map(decode_node), + Some(mmr::Node::Data(( + (leaf_index, H256::repeat_byte(u8::try_from(block_num).unwrap())), + LeafData::new(block_num.into()), + ))) + ); + } + + // verify a couple of nodes and peaks: + // `pos` is node to verify, + // `leaf_index` is leaf that added node `pos`, + // `expected` is expected value of node at `pos`. + let verify = |pos: NodeIndex, leaf_index: LeafIndex, expected: H256| { + let parent_num: BlockNumber = leaf_index.try_into().unwrap(); + let parent_hash = >::block_hash(parent_num); + // not canon, + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(pos)), None); + // but available in fork-proof storage. + assert_eq!( + offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)).map(decode_node), + Some(mmr::Node::Hash(expected)) + ); + }; + verify(2, 1, hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")); + verify(13, 7, hex("441bf63abc7cf9b9e82eb57b8111c883d50ae468d9fd7f301e12269fc0fa1e75")); + verify(21, 11, hex("f323ac1a7f56de5f40ed8df3e97af74eec0ee9d72883679e49122ffad2ffd03b")); + }); + + // add another `frame_system::BlockHashCount` blocks and verify all nodes and leaves + // added by our original `to_canon_count` blocks have now been canonicalized in offchain db. + let block_hash_size: u64 = ::BlockHashCount::get(); + let base = to_canon_count; + for blocknum in base..(base + u32::try_from(block_hash_size).unwrap()) { + ext.execute_with(|| { + new_block(); + as Hooks>::offchain_worker(blocknum.into()); + }); + ext.persist_offchain_overlay(); + } + ext.execute_with(|| { + // verify leaves added by blocks 1..=13, should be in offchain under canon key. + for block_num in 1..=to_canon_count { + let leaf_index = u64::from(block_num - 1); + let pos = helper::leaf_index_to_pos(leaf_index.into()); + let parent_num: BlockNumber = (block_num - 1).into(); + let parent_hash = >::block_hash(parent_num); + // no longer available in fork-proof storage (was pruned), + assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)), None); + // but available using canon key. + assert_eq!( + offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), + Some(mmr::Node::Data(( + (leaf_index, H256::repeat_byte(u8::try_from(block_num).unwrap())), + LeafData::new(block_num.into()), + ))) + ); + } + + // also check some nodes and peaks: + // `pos` is node to verify, + // `leaf_index` is leaf that added node `pos`, + // `expected` is expected value of node at `pos`. + let verify = |pos: NodeIndex, leaf_index: LeafIndex, expected: H256| { + let parent_num: BlockNumber = leaf_index.try_into().unwrap(); + let parent_hash = >::block_hash(parent_num); + // no longer available in fork-proof storage (was pruned), + assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)), None); + // but available using canon key. + assert_eq!( + offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), + Some(mmr::Node::Hash(expected)) + ); + }; + verify(2, 1, hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")); + verify(13, 7, hex("441bf63abc7cf9b9e82eb57b8111c883d50ae468d9fd7f301e12269fc0fa1e75")); + verify(21, 11, hex("f323ac1a7f56de5f40ed8df3e97af74eec0ee9d72883679e49122ffad2ffd03b")); + }); +} + +#[test] +fn should_verify_canonicalized() { + use frame_support::traits::Hooks; + let _ = env_logger::try_init(); + + // How deep is our fork-aware storage (in terms of blocks/leaves, nodes will be more). + let block_hash_size: u64 = ::BlockHashCount::get(); + + // Start off with chain initialisation and storing indexing data off-chain. + // Create twice as many leaf entries than our fork-aware capacity, + // resulting in ~half of MMR storage to use canonical keys and the other half fork-aware keys. + // Verify that proofs can be generated (using leaves and nodes from full set) and verified. + let mut ext = new_test_ext(); + register_offchain_ext(&mut ext); + for blocknum in 0u32..(2 * block_hash_size).try_into().unwrap() { + ext.execute_with(|| { + new_block(); + as Hooks>::offchain_worker(blocknum.into()); + }); + ext.persist_offchain_overlay(); + } + + // Generate proofs for some blocks. + let (leaves, proofs) = + ext.execute_with(|| crate::Pallet::::generate_batch_proof(vec![0, 4, 5, 7]).unwrap()); + // Verify all previously generated proofs. + ext.execute_with(|| { + assert_eq!(crate::Pallet::::verify_leaves(leaves, proofs), Ok(())); + }); + + // Generate proofs for some new blocks. + let (leaves, proofs) = ext.execute_with(|| { + crate::Pallet::::generate_batch_proof(vec![block_hash_size + 7]).unwrap() + }); + // Add some more blocks then verify all previously generated proofs. + ext.execute_with(|| { + add_blocks(7); + assert_eq!(crate::Pallet::::verify_leaves(leaves, proofs), Ok(())); + }); +} diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index 8201426f5330f..dafc421a7b72d 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -80,7 +80,7 @@ benchmarks! { // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, false, 0) + }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, false, Weight::zero()) verify { assert!(Multisigs::::contains_key(multi_account_id, call_hash)); assert!(!Calls::::contains_key(call_hash)); @@ -99,7 +99,7 @@ benchmarks! { // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, true, 0) + }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, true, Weight::zero()) verify { assert!(Multisigs::::contains_key(multi_account_id, call_hash)); assert!(Calls::::contains_key(call_hash)); @@ -118,13 +118,13 @@ benchmarks! { // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi, storing for worst case - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, Weight::zero())?; assert!(Calls::::contains_key(call_hash)); let caller2 = signatories2.remove(0); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, 0) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::zero()) verify { let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); @@ -143,13 +143,13 @@ benchmarks! { // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi, not storing - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), false, 0)?; + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), false, Weight::zero())?; assert!(!Calls::::contains_key(call_hash)); let caller2 = signatories2.remove(0); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, true, 0) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, true, Weight::zero()) verify { let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); @@ -169,20 +169,20 @@ benchmarks! { // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi, storing it for worst case - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, Weight::zero())?; // Everyone except the first person approves for i in 1 .. s - 1 { let mut signatories_loop = signatories2.clone(); let caller_loop = signatories_loop.remove(i as usize); let o = RawOrigin::Signed(caller_loop).into(); - Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, 0)?; + Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, Weight::zero())?; } let caller2 = signatories2.remove(0); assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::max_value()) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::MAX) verify { assert!(!Multisigs::::contains_key(&multi_account_id, call_hash)); } @@ -200,7 +200,7 @@ benchmarks! { let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); // Create the multi - }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash, 0) + }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash, Weight::zero()) verify { assert!(Multisigs::::contains_key(multi_account_id, call_hash)); } @@ -225,13 +225,13 @@ benchmarks! { None, call, false, - 0 + Weight::zero() )?; let caller2 = signatories2.remove(0); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash, 0) + }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash, Weight::zero()) verify { let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); @@ -251,13 +251,13 @@ benchmarks! { // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, Weight::zero())?; // Everyone except the first person approves for i in 1 .. s - 1 { let mut signatories_loop = signatories2.clone(); let caller_loop = signatories_loop.remove(i as usize); let o = RawOrigin::Signed(caller_loop).into(); - Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, 0)?; + Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, Weight::zero())?; } let caller2 = signatories2.remove(0); assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); @@ -270,7 +270,7 @@ benchmarks! { signatories2, Some(timepoint), call_hash, - Weight::max_value() + Weight::MAX ) verify { assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); @@ -288,7 +288,7 @@ benchmarks! { let timepoint = Multisig::::timepoint(); // Create the multi let o = RawOrigin::Signed(caller.clone()).into(); - Multisig::::as_multi(o, s as u16, signatories.clone(), None, call, true, 0)?; + Multisig::::as_multi(o, s as u16, signatories.clone(), None, call, true, Weight::zero())?; assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); assert!(Calls::::contains_key(call_hash)); // Whitelist caller account from further DB operations. diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index d4ea041e5820e..10184ce84a9a8 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -257,9 +257,9 @@ pub mod pallet { let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::as_multi_threshold_1(call.using_encoded(|c| c.len() as u32)) - .saturating_add(dispatch_info.weight) // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(dispatch_info.weight), dispatch_info.class, ) })] diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index d67d06e1bce05..ddf5197ab734c 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -50,7 +50,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; @@ -152,7 +152,7 @@ fn multisig_deposit_is_taken_and_returned() { None, OpaqueCall::from_encoded(data.clone()), false, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(1), 2); assert_eq!(Balances::reserved_balance(1), 3); @@ -190,7 +190,7 @@ fn multisig_deposit_is_taken_and_returned_with_call_storage() { None, OpaqueCall::from_encoded(data), true, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::reserved_balance(1), 5); @@ -226,8 +226,8 @@ fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { 3, vec![2, 3], None, - hash.clone(), - 0 + hash, + Weight::zero() )); assert_eq!(Balances::free_balance(1), 1); assert_eq!(Balances::reserved_balance(1), 4); @@ -239,7 +239,7 @@ fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { Some(now()), OpaqueCall::from_encoded(data), true, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(2), 3); assert_eq!(Balances::reserved_balance(2), 2); @@ -271,26 +271,20 @@ fn cancel_multisig_returns_deposit() { 3, vec![2, 3], None, - hash.clone(), - 0 + hash, + Weight::zero() )); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), 3, vec![1, 3], Some(now()), - hash.clone(), - 0 + hash, + Weight::zero() )); assert_eq!(Balances::free_balance(1), 6); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Multisig::cancel_as_multi( - Origin::signed(1), - 3, - vec![2, 3], - now(), - hash.clone() - ),); + assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash)); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -313,13 +307,20 @@ fn timepoint_checking_works() { 2, vec![1, 3], Some(now()), - hash.clone(), - 0 + hash, + Weight::zero() ), Error::::UnexpectedTimepoint, ); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + hash, + Weight::zero() + )); assert_noop!( Multisig::as_multi( @@ -329,7 +330,7 @@ fn timepoint_checking_works() { None, OpaqueCall::from_encoded(call.clone()), false, - 0 + Weight::zero() ), Error::::NoTimepoint, ); @@ -342,7 +343,7 @@ fn timepoint_checking_works() { Some(later), OpaqueCall::from_encoded(call), false, - 0 + Weight::zero() ), Error::::WrongTimepoint, ); @@ -368,7 +369,7 @@ fn multisig_2_of_3_works_with_call_storing() { None, OpaqueCall::from_encoded(data), true, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); @@ -396,7 +397,14 @@ fn multisig_2_of_3_works() { let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + hash, + Weight::zero() + )); assert_eq!(Balances::free_balance(6), 0); assert_ok!(Multisig::as_multi( @@ -429,16 +437,16 @@ fn multisig_3_of_3_works() { 3, vec![2, 3], None, - hash.clone(), - 0 + hash, + Weight::zero() )); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), 3, vec![1, 3], Some(now()), - hash.clone(), - 0 + hash, + Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); @@ -465,28 +473,22 @@ fn cancel_multisig_works() { 3, vec![2, 3], None, - hash.clone(), - 0 + hash, + Weight::zero() )); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), 3, vec![1, 3], Some(now()), - hash.clone(), - 0 + hash, + Weight::zero() )); assert_noop!( - Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), + Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash), Error::::NotOwner, ); - assert_ok!(Multisig::cancel_as_multi( - Origin::signed(1), - 3, - vec![2, 3], - now(), - hash.clone() - ),); + assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash),); }); } @@ -502,7 +504,7 @@ fn cancel_multisig_with_call_storage_works() { None, OpaqueCall::from_encoded(call), true, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(1), 4); assert_ok!(Multisig::approve_as_multi( @@ -510,20 +512,14 @@ fn cancel_multisig_with_call_storage_works() { 3, vec![1, 3], Some(now()), - hash.clone(), - 0 + hash, + Weight::zero() )); assert_noop!( - Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), + Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash), Error::::NotOwner, ); - assert_ok!(Multisig::cancel_as_multi( - Origin::signed(1), - 3, - vec![2, 3], - now(), - hash.clone() - ),); + assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash),); assert_eq!(Balances::free_balance(1), 10); }); } @@ -538,8 +534,8 @@ fn cancel_multisig_with_alt_call_storage_works() { 3, vec![2, 3], None, - hash.clone(), - 0 + hash, + Weight::zero() )); assert_eq!(Balances::free_balance(1), 6); assert_ok!(Multisig::as_multi( @@ -549,7 +545,7 @@ fn cancel_multisig_with_alt_call_storage_works() { Some(now()), OpaqueCall::from_encoded(call), true, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(2), 8); assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash)); @@ -576,7 +572,7 @@ fn multisig_2_of_3_as_multi_works() { None, OpaqueCall::from_encoded(data.clone()), false, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); @@ -615,7 +611,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { None, OpaqueCall::from_encoded(data1.clone()), false, - 0 + Weight::zero() )); assert_ok!(Multisig::as_multi( Origin::signed(2), @@ -624,7 +620,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { None, OpaqueCall::from_encoded(data2.clone()), false, - 0 + Weight::zero() )); assert_ok!(Multisig::as_multi( Origin::signed(3), @@ -669,7 +665,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { None, OpaqueCall::from_encoded(data.clone()), false, - 0 + Weight::zero() )); assert_ok!(Multisig::as_multi( Origin::signed(2), @@ -689,7 +685,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { None, OpaqueCall::from_encoded(data.clone()), false, - 0 + Weight::zero() )); assert_ok!(Multisig::as_multi( Origin::signed(3), @@ -727,7 +723,7 @@ fn minimum_threshold_check_works() { None, OpaqueCall::from_encoded(call.clone()), false, - 0 + Weight::zero() ), Error::::MinimumThreshold, ); @@ -739,7 +735,7 @@ fn minimum_threshold_check_works() { None, OpaqueCall::from_encoded(call.clone()), false, - 0 + Weight::zero() ), Error::::MinimumThreshold, ); @@ -758,7 +754,7 @@ fn too_many_signatories_fails() { None, OpaqueCall::from_encoded(call), false, - 0 + Weight::zero() ), Error::::TooManySignatories, ); @@ -775,8 +771,8 @@ fn duplicate_approvals_are_ignored() { 2, vec![2, 3], None, - hash.clone(), - 0 + hash, + Weight::zero() )); assert_noop!( Multisig::approve_as_multi( @@ -784,8 +780,8 @@ fn duplicate_approvals_are_ignored() { 2, vec![2, 3], Some(now()), - hash.clone(), - 0 + hash, + Weight::zero() ), Error::::AlreadyApproved, ); @@ -794,8 +790,8 @@ fn duplicate_approvals_are_ignored() { 2, vec![1, 3], Some(now()), - hash.clone(), - 0 + hash, + Weight::zero() )); assert_noop!( Multisig::approve_as_multi( @@ -803,8 +799,8 @@ fn duplicate_approvals_are_ignored() { 2, vec![1, 2], Some(now()), - hash.clone(), - 0 + hash, + Weight::zero() ), Error::::AlreadyApproved, ); @@ -822,7 +818,14 @@ fn multisig_1_of_3_works() { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_noop!( - Multisig::approve_as_multi(Origin::signed(1), 1, vec![2, 3], None, hash.clone(), 0), + Multisig::approve_as_multi( + Origin::signed(1), + 1, + vec![2, 3], + None, + hash, + Weight::zero() + ), Error::::MinimumThreshold, ); assert_noop!( @@ -833,7 +836,7 @@ fn multisig_1_of_3_works() { None, OpaqueCall::from_encoded(call), false, - 0 + Weight::zero() ), Error::::MinimumThreshold, ); @@ -872,7 +875,7 @@ fn weight_check_works() { None, OpaqueCall::from_encoded(data.clone()), false, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); @@ -884,7 +887,7 @@ fn weight_check_works() { Some(now()), OpaqueCall::from_encoded(data), false, - 0 + Weight::zero() ), Error::::MaxWeightTooLow, ); @@ -911,24 +914,24 @@ fn multisig_handles_no_preimage_after_all_approve() { 3, vec![2, 3], None, - hash.clone(), - 0 + hash, + Weight::zero() )); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), 3, vec![1, 3], Some(now()), - hash.clone(), - 0 + hash, + Weight::zero() )); assert_ok!(Multisig::approve_as_multi( Origin::signed(3), 3, vec![1, 2], Some(now()), - hash.clone(), - 0 + hash, + Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs index 7946b96546768..0b580ec82b640 100644 --- a/frame/multisig/src/weights.rs +++ b/frame/multisig/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_multisig. @@ -60,199 +60,199 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn as_multi_threshold_1(_z: u32, ) -> Weight { - (17_537_000 as Weight) + Weight::from_ref_time(17_537_000 as RefTimeWeight) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn as_multi_create(s: u32, z: u32, ) -> Weight { - (36_535_000 as Weight) + Weight::from_ref_time(36_535_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((99_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(99_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - (39_918_000 as Weight) + Weight::from_ref_time(39_918_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((95_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(95_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) fn as_multi_approve(s: u32, z: u32, ) -> Weight { - (25_524_000 as Weight) + Weight::from_ref_time(25_524_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((94_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(94_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - (39_923_000 as Weight) + Weight::from_ref_time(39_923_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((91_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(91_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) fn as_multi_complete(s: u32, z: u32, ) -> Weight { - (45_877_000 as Weight) + Weight::from_ref_time(45_877_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((146_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(146_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn approve_as_multi_create(s: u32, ) -> Weight { - (34_309_000 as Weight) + Weight::from_ref_time(34_309_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:0) fn approve_as_multi_approve(s: u32, ) -> Weight { - (22_848_000 as Weight) + Weight::from_ref_time(22_848_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) fn approve_as_multi_complete(s: u32, ) -> Weight { - (63_239_000 as Weight) + Weight::from_ref_time(63_239_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((161_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) fn cancel_as_multi(s: u32, ) -> Weight { - (51_254_000 as Weight) + Weight::from_ref_time(51_254_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((118_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn as_multi_threshold_1(_z: u32, ) -> Weight { - (17_537_000 as Weight) + Weight::from_ref_time(17_537_000 as RefTimeWeight) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn as_multi_create(s: u32, z: u32, ) -> Weight { - (36_535_000 as Weight) + Weight::from_ref_time(36_535_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((99_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(99_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - (39_918_000 as Weight) + Weight::from_ref_time(39_918_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((95_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(95_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) fn as_multi_approve(s: u32, z: u32, ) -> Weight { - (25_524_000 as Weight) + Weight::from_ref_time(25_524_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((94_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(94_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - (39_923_000 as Weight) + Weight::from_ref_time(39_923_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((91_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(91_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) fn as_multi_complete(s: u32, z: u32, ) -> Weight { - (45_877_000 as Weight) + Weight::from_ref_time(45_877_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((146_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(146_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn approve_as_multi_create(s: u32, ) -> Weight { - (34_309_000 as Weight) + Weight::from_ref_time(34_309_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:0) fn approve_as_multi_approve(s: u32, ) -> Weight { - (22_848_000 as Weight) + Weight::from_ref_time(22_848_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) fn approve_as_multi_complete(s: u32, ) -> Weight { - (63_239_000 as Weight) + Weight::from_ref_time(63_239_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((161_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) fn cancel_as_multi(s: u32, ) -> Weight { - (51_254_000 as Weight) + Weight::from_ref_time(51_254_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((118_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 4272b7ab7fd2b..ffe39802e0786 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -47,6 +47,7 @@ type AccountIdOf = ::AccountId; type BalanceOf = <::Currency as Currency>>::Balance; type NegativeImbalanceOf = <::Currency as Currency>>::NegativeImbalance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[frame_support::pallet] pub mod pallet { @@ -193,10 +194,7 @@ pub mod pallet { /// - One event. /// # #[pallet::weight(70_000_000)] - pub fn kill_name( - origin: OriginFor, - target: ::Source, - ) -> DispatchResult { + pub fn kill_name(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; // Figure out who we're meant to be clearing. @@ -225,7 +223,7 @@ pub mod pallet { #[pallet::weight(70_000_000)] pub fn force_name( origin: OriginFor, - target: ::Source, + target: AccountIdLookupOf, name: Vec, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; @@ -275,7 +273,7 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 07f2e9de37dde..427e71af6e8c4 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -46,9 +46,12 @@ pub mod weights; pub use pallet::*; use sp_core::OpaquePeerId as PeerId; +use sp_runtime::traits::StaticLookup; use sp_std::{collections::btree_set::BTreeSet, iter::FromIterator, prelude::*}; pub use weights::WeightInfo; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -211,9 +214,10 @@ pub mod pallet { pub fn add_well_known_node( origin: OriginFor, node: PeerId, - owner: T::AccountId, + owner: AccountIdLookupOf, ) -> DispatchResult { T::AddOrigin::ensure_origin(origin)?; + let owner = T::Lookup::lookup(owner)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); let mut nodes = WellKnownNodes::::get(); @@ -355,9 +359,10 @@ pub mod pallet { pub fn transfer_node( origin: OriginFor, node: PeerId, - owner: T::AccountId, + owner: AccountIdLookupOf, ) -> DispatchResult { let sender = ensure_signed(origin)?; + let owner = T::Lookup::lookup(owner)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); let pre_owner = Owners::::get(&node).ok_or(Error::::NotClaimed)?; diff --git a/frame/node-authorization/src/weights.rs b/frame/node-authorization/src/weights.rs index cf182f94273ce..f5a816ddaecee 100644 --- a/frame/node-authorization/src/weights.rs +++ b/frame/node-authorization/src/weights.rs @@ -21,7 +21,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; pub trait WeightInfo { @@ -37,13 +37,13 @@ pub trait WeightInfo { } impl WeightInfo for () { - fn add_well_known_node() -> Weight { 50_000_000 } - fn remove_well_known_node() -> Weight { 50_000_000 } - fn swap_well_known_node() -> Weight { 50_000_000 } - fn reset_well_known_nodes() -> Weight { 50_000_000 } - fn claim_node() -> Weight { 50_000_000 } - fn remove_claim() -> Weight { 50_000_000 } - fn transfer_node() -> Weight { 50_000_000 } - fn add_connections() -> Weight { 50_000_000 } - fn remove_connections() -> Weight { 50_000_000 } + fn add_well_known_node() -> Weight { Weight::from_ref_time(50_000_000) } + fn remove_well_known_node() -> Weight { Weight::from_ref_time(50_000_000) } + fn swap_well_known_node() -> Weight { Weight::from_ref_time(50_000_000) } + fn reset_well_known_nodes() -> Weight { Weight::from_ref_time(50_000_000) } + fn claim_node() -> Weight { Weight::from_ref_time(50_000_000) } + fn remove_claim() -> Weight { Weight::from_ref_time(50_000_000) } + fn transfer_node() -> Weight { Weight::from_ref_time(50_000_000) } + fn add_connections() -> Weight { Weight::from_ref_time(50_000_000) } + fn remove_connections() -> Weight { Weight::from_ref_time(50_000_000) } } diff --git a/frame/nomination-pools/benchmarking/src/lib.rs b/frame/nomination-pools/benchmarking/src/lib.rs index 4c2c902846a85..f4ecb4a9b0ff1 100644 --- a/frame/nomination-pools/benchmarking/src/lib.rs +++ b/frame/nomination-pools/benchmarking/src/lib.rs @@ -31,7 +31,7 @@ use pallet_nomination_pools::{ MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, Pallet as Pools, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, }; -use sp_runtime::traits::{Bounded, Zero}; +use sp_runtime::traits::{Bounded, StaticLookup, Zero}; use sp_staking::{EraIndex, StakingInterface}; // `frame_benchmarking::benchmarks!` macro needs this use pallet_nomination_pools::Call; @@ -73,13 +73,14 @@ fn create_pool_account( let ed = CurrencyOf::::minimum_balance(); let pool_creator: T::AccountId = create_funded_user_with_balance::("pool_creator", n, ed + balance * 2u32.into()); + let pool_creator_lookup = T::Lookup::unlookup(pool_creator.clone()); Pools::::create( Origin::Signed(pool_creator.clone()).into(), balance, - pool_creator.clone(), - pool_creator.clone(), - pool_creator.clone(), + pool_creator_lookup.clone(), + pool_creator_lookup.clone(), + pool_creator_lookup, ) .unwrap(); @@ -224,7 +225,7 @@ frame_benchmarking::benchmarks! { origin_weight ); - let max_additional = scenario.dest_weight.clone() - origin_weight; + let max_additional = scenario.dest_weight - origin_weight; let joiner_free = CurrencyOf::::minimum_balance() + max_additional; let joiner: T::AccountId @@ -243,7 +244,7 @@ frame_benchmarking::benchmarks! { bond_extra_transfer { let origin_weight = min_create_bond::() * 2u32.into(); let scenario = ListScenario::::new(origin_weight, true)?; - let extra = scenario.dest_weight.clone() - origin_weight; + let extra = scenario.dest_weight - origin_weight; // creator of the src pool will bond-extra, bumping itself to dest bag. @@ -258,7 +259,7 @@ frame_benchmarking::benchmarks! { bond_extra_reward { let origin_weight = min_create_bond::() * 2u32.into(); let scenario = ListScenario::::new(origin_weight, true)?; - let extra = (scenario.dest_weight.clone() - origin_weight).max(CurrencyOf::::minimum_balance()); + let extra = (scenario.dest_weight - origin_weight).max(CurrencyOf::::minimum_balance()); // transfer exactly `extra` to the depositor of the src pool (1), let reward_account1 = Pools::::create_reward_account(1); @@ -306,17 +307,18 @@ frame_benchmarking::benchmarks! { // significantly higher than the first position in a list (e.g. the first bag threshold). let origin_weight = min_create_bond::() * 200u32.into(); let scenario = ListScenario::::new(origin_weight, false)?; - let amount = origin_weight - scenario.dest_weight.clone(); + let amount = origin_weight - scenario.dest_weight; let scenario = scenario.add_joiner(amount); let member_id = scenario.origin1_member.unwrap().clone(); + let member_id_lookup = T::Lookup::unlookup(member_id.clone()); let all_points = PoolMembers::::get(&member_id).unwrap().points; whitelist_account!(member_id); - }: _(Origin::Signed(member_id.clone()), member_id.clone(), all_points) + }: _(Origin::Signed(member_id.clone()), member_id_lookup, all_points) verify { let bonded_after = T::StakingInterface::active_stake(&scenario.origin1).unwrap(); // We at least went down to the destination bag - assert!(bonded_after <= scenario.dest_weight.clone()); + assert!(bonded_after <= scenario.dest_weight); let member = PoolMembers::::get( &member_id ) @@ -382,6 +384,7 @@ frame_benchmarking::benchmarks! { // Add a new member let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); + let joiner_lookup = T::Lookup::unlookup(joiner.clone()); Pools::::join(Origin::Signed(joiner.clone()).into(), min_join_bond, 1) .unwrap(); @@ -408,7 +411,7 @@ frame_benchmarking::benchmarks! { pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); whitelist_account!(joiner); - }: withdraw_unbonded(Origin::Signed(joiner.clone()), joiner.clone(), s) + }: withdraw_unbonded(Origin::Signed(joiner.clone()), joiner_lookup, s) verify { assert_eq!( CurrencyOf::::free_balance(&joiner), @@ -423,6 +426,7 @@ frame_benchmarking::benchmarks! { let min_create_bond = min_create_bond::(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond); + let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // We set the pool to the destroying state so the depositor can leave BondedPools::::try_mutate(&1, |maybe_bonded_pool| { @@ -465,7 +469,7 @@ frame_benchmarking::benchmarks! { assert!(frame_system::Account::::contains_key(&reward_account)); whitelist_account!(depositor); - }: withdraw_unbonded(Origin::Signed(depositor.clone()), depositor.clone(), s) + }: withdraw_unbonded(Origin::Signed(depositor.clone()), depositor_lookup, s) verify { // Pool removal worked assert!(!pallet_staking::Ledger::::contains_key(&pool_account)); @@ -487,6 +491,7 @@ frame_benchmarking::benchmarks! { create { let min_create_bond = min_create_bond::(); let depositor: T::AccountId = account("depositor", USER_SEED, 0); + let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // Give the depositor some balance to bond CurrencyOf::::make_free_balance_be(&depositor, min_create_bond * 2u32.into()); @@ -499,9 +504,9 @@ frame_benchmarking::benchmarks! { }: _( Origin::Signed(depositor.clone()), min_create_bond, - depositor.clone(), - depositor.clone(), - depositor.clone() + depositor_lookup.clone(), + depositor_lookup.clone(), + depositor_lookup ) verify { assert_eq!(RewardPools::::count(), 1); diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index eb884869f6d32..d239d4f072b80 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -17,7 +17,10 @@ use frame_election_provider_support::VoteWeight; use frame_support::{pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; -use sp_runtime::traits::{Convert, IdentityLookup}; +use sp_runtime::{ + traits::{Convert, IdentityLookup}, + FixedU128, +}; type AccountId = u128; type AccountIndex = u32; @@ -144,13 +147,15 @@ impl Convert for U256ToBalance { parameter_types! { pub static PostUnbondingPoolsWindow: u32 = 10; pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); - pub const MinPointsToBalance: u32 = 10; + pub const MaxPointsToBalance: u8 = 10; } impl pallet_nomination_pools::Config for Runtime { type Event = Event; type WeightInfo = (); type Currency = Balances; + type CurrencyBalance = Balance; + type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; type StakingInterface = Staking; @@ -158,19 +163,11 @@ impl pallet_nomination_pools::Config for Runtime { type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; type PalletId = PoolsPalletId; - type MinPointsToBalance = MinPointsToBalance; + type MaxPointsToBalance = MaxPointsToBalance; } impl crate::Config for Runtime {} -impl frame_system::offchain::SendTransactionTypes for Runtime -where - Call: From, -{ - type OverarchingCall = Call; - type Extrinsic = UncheckedExtrinsic; -} - type Block = frame_system::mocking::MockBlock; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; frame_support::construct_runtime!( diff --git a/frame/nomination-pools/runtime-api/Cargo.toml b/frame/nomination-pools/runtime-api/Cargo.toml new file mode 100644 index 0000000000000..dde925c62fe0d --- /dev/null +++ b/frame/nomination-pools/runtime-api/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "pallet-nomination-pools-runtime-api" +version = "1.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Runtime API for nomination-pools FRAME pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } + +[features] +default = ["std"] +std = [ + "codec/std", + "sp-api/std", + "sp-std/std", +] diff --git a/frame/nomination-pools/runtime-api/README.md b/frame/nomination-pools/runtime-api/README.md new file mode 100644 index 0000000000000..af90b31733b0b --- /dev/null +++ b/frame/nomination-pools/runtime-api/README.md @@ -0,0 +1,3 @@ +Runtime API definition for nomination-pools pallet. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/nomination-pools/runtime-api/src/lib.rs b/frame/nomination-pools/runtime-api/src/lib.rs new file mode 100644 index 0000000000000..aa3ca57ca5b8b --- /dev/null +++ b/frame/nomination-pools/runtime-api/src/lib.rs @@ -0,0 +1,33 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Runtime API definition for nomination-pools pallet. +//! Currently supports only one rpc endpoint. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::Codec; + +sp_api::decl_runtime_apis! { + /// Runtime api for accessing information about nomination pools. + pub trait NominationPoolsApi + where AccountId: Codec, Balance: Codec + { + /// Returns the pending rewards for the member that the AccountId was given for. + fn pending_rewards(member: AccountId) -> Balance; + } +} diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 19155a51405b0..40db4ac64851a 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -17,8 +17,8 @@ //! # Nomination Pools for Staking Delegation //! -//! A pallet that allows members to delegate their stake to nominating pools. A nomination pool -//! acts as nominator and nominates validators on the members behalf. +//! A pallet that allows members to delegate their stake to nominating pools. A nomination pool acts +//! as nominator and nominates validators on the members behalf. //! //! # Index //! @@ -28,22 +28,33 @@ //! //! ## Key terms //! +//! * pool id: A unique identifier of each pool. Set to u12 //! * bonded pool: Tracks the distribution of actively staked funds. See [`BondedPool`] and -//! [`BondedPoolInner`]. Bonded pools are identified via the pools bonded account. +//! [`BondedPoolInner`]. //! * reward pool: Tracks rewards earned by actively staked funds. See [`RewardPool`] and -//! [`RewardPools`]. Reward pools are identified via the pools bonded account. +//! [`RewardPools`]. //! * unbonding sub pools: Collection of pools at different phases of the unbonding lifecycle. See -//! [`SubPools`] and [`SubPoolsStorage`]. Sub pools are identified via the pools bonded account. -//! * members: Accounts that are members of pools. See [`PoolMember`] and [`PoolMembers`]. Pool -//! members are identified via their account. -//! * point: A unit of measure for a members portion of a pool's funds. +//! [`SubPools`] and [`SubPoolsStorage`]. +//! * members: Accounts that are members of pools. See [`PoolMember`] and [`PoolMembers`]. +//! * roles: Administrative roles of each pool, capable of controlling nomination, and the state of +//! the pool. +//! * point: A unit of measure for a members portion of a pool's funds. Points initially have a +//! ratio of 1 (as set by `POINTS_TO_BALANCE_INIT_RATIO`) to balance, but as slashing happens, +//! this can change. //! * kick: The act of a pool administrator forcibly ejecting a member. +//! * bonded account: A key-less account id derived from the pool id that acts as the bonded +//! account. This account registers itself as a nominator in the staking system, and follows +//! exactly the same rules and conditions as a normal staker. Its bond increases or decreases as +//! members join, it can `nominate` or `chill`, and might not even earn staking rewards if it is +//! not nominating proper validators. +//! * reward account: A similar key-less account, that is set as the `Payee` account fo the bonded +//! account for all staking rewards. //! //! ## Usage //! //! ### Join //! -//! A account can stake funds with a nomination pool by calling [`Call::join`]. +//! An account can stake funds with a nomination pool by calling [`Call::join`]. //! //! ### Claim rewards //! @@ -55,11 +66,13 @@ //! //! In order to leave, a member must take two steps. //! -//! First, they must call [`Call::unbond`]. The unbond other extrinsic will start the -//! unbonding process by unbonding all of the members funds. +//! First, they must call [`Call::unbond`]. The unbond extrinsic will start the unbonding process by +//! unbonding all or a portion of the members funds. //! -//! Second, once [`sp_staking::StakingInterface::bonding_duration`] eras have passed, the member -//! can call [`Call::withdraw_unbonded`] to withdraw all their funds. +//! > A member can have up to [`Config::MaxUnbonding`] distinct active unbonding requests. +//! +//! Second, once [`sp_staking::StakingInterface::bonding_duration`] eras have passed, the member can +//! call [`Call::withdraw_unbonded`] to withdraw any funds that are free. //! //! For design docs see the [bonded pool](#bonded-pool) and [unbonding sub //! pools](#unbonding-sub-pools) sections. @@ -67,9 +80,13 @@ //! ### Slashes //! //! Slashes are distributed evenly across the bonded pool and the unbonding pools from slash era+1 -//! through the slash apply era. Thus, any member who either a) unbonded or b) was actively -//! bonded in the aforementioned range of eras will be affected by the slash. A member is slashed -//! pro-rata based on its stake relative to the total slash amount. +//! through the slash apply era. Thus, any member who either +//! +//! 1. unbonded, or +//! 2. was actively bonded +// +//! in the aforementioned range of eras will be affected by the slash. A member is slashed pro-rata +//! based on its stake relative to the total slash amount. //! //! For design docs see the [slashing](#slashing) section. //! @@ -82,20 +99,33 @@ //! To help facilitate pool administration the pool has one of three states (see [`PoolState`]): //! //! * Open: Anyone can join the pool and no members can be permissionlessly removed. -//! * Blocked: No members can join and some admin roles can kick members. +//! * Blocked: No members can join and some admin roles can kick members. Kicking is not instant, +//! and follows the same process of `unbond` and then `withdraw_unbonded`. In other words, +//! administrators can permissionlessly unbond other members. //! * Destroying: No members can join and all members can be permissionlessly removed with //! [`Call::unbond`] and [`Call::withdraw_unbonded`]. Once a pool is in destroying state, it //! cannot be reverted to another state. //! -//! A pool has 3 administrative roles (see [`PoolRoles`]): +//! A pool has 4 administrative roles (see [`PoolRoles`]): //! //! * Depositor: creates the pool and is the initial member. They can only leave the pool once all -//! other members have left. Once they fully leave the pool is destroyed. +//! other members have left. Once they fully withdraw their funds, the pool is destroyed. //! * Nominator: can select which validators the pool nominates. //! * State-Toggler: can change the pools state and kick members if the pool is blocked. //! * Root: can change the nominator, state-toggler, or itself and can perform any of the actions //! the nominator or state-toggler can. //! +//! ### Dismantling +//! +//! As noted, a pool is destroyed once +//! +//! 1. First, all members need to fully unbond and withdraw. If the pool state is set to +//! `Destroying`, this can happen permissionlessly. +//! 2. The depositor itself fully unbonds and withdraws. Note that at this point, based on the +//! requirements of the staking system, the pool's bonded account's stake might not be able to ge +//! below a certain threshold as a nominator. At this point, the pool should `chill` itself to +//! allow the depositor to leave. +//! //! ## Design //! //! _Notes_: this section uses pseudo code to explain general design and does not necessarily @@ -108,8 +138,8 @@ //! members that where in the pool while it was backing a validator that got slashed. //! * Maximize scalability in terms of member count. //! -//! In order to maintain scalability, all operations are independent of the number of members. To -//! do this, delegation specific information is stored local to the member while the pool data +//! In order to maintain scalability, all operations are independent of the number of members. To do +//! this, delegation specific information is stored local to the member while the pool data //! structures have bounded datum. //! //! ### Bonded pool @@ -118,9 +148,9 @@ //! unbonding. The total points of a bonded pool are always equal to the sum of points of the //! delegation members. A bonded pool tracks its points and reads its bonded balance. //! -//! When a member joins a pool, `amount_transferred` is transferred from the members account -//! to the bonded pools account. Then the pool calls `staking::bond_extra(amount_transferred)` and -//! issues new points which are tracked by the member and added to the bonded pool's points. +//! When a member joins a pool, `amount_transferred` is transferred from the members account to the +//! bonded pools account. Then the pool calls `staking::bond_extra(amount_transferred)` and issues +//! new points which are tracked by the member and added to the bonded pool's points. //! //! When the pool already has some balance, we want the value of a point before the transfer to //! equal the value of a point after the transfer. So, when a member joins a bonded pool with a @@ -148,77 +178,13 @@ //! ### Reward pool //! //! When a pool is first bonded it sets up an deterministic, inaccessible account as its reward -//! destination. To track staking rewards we track how the balance of this reward account changes. -//! -//! The reward pool needs to store: -//! -//! * The pool balance at the time of the last payout: `reward_pool.balance` -//! * The total earnings ever at the time of the last payout: `reward_pool.total_earnings` -//! * The total points in the pool at the time of the last payout: `reward_pool.points` -//! -//! And the member needs to store: -//! -//! * The total payouts at the time of the last payout by that member: -//! `member.reward_pool_total_earnings` -//! -//! Before the first reward claim is initiated for a pool, all the above variables are set to zero. -//! -//! When a member initiates a claim, the following happens: -//! -//! 1) Compute the reward pool's total points and the member's virtual points in the reward pool -//! * First `current_total_earnings` is computed (`current_balance` is the free balance of the -//! reward pool at the beginning of these operations.) -//! ```text -//! current_total_earnings = -//! current_balance - reward_pool.balance + pool.total_earnings; -//! ``` -//! * Then the `current_points` is computed. Every balance unit that was added to the reward -//! pool since last time recorded means that the `pool.points` is increased by -//! `bonding_pool.total_points`. In other words, for every unit of balance that has been -//! earned by the reward pool, the reward pool points are inflated by `bonded_pool.points`. In -//! effect this allows each, single unit of balance (e.g. planck) to be divvied up pro-rata -//! among members based on points. -//! ```text -//! new_earnings = current_total_earnings - reward_pool.total_earnings; -//! current_points = reward_pool.points + bonding_pool.points * new_earnings; -//! ``` -//! * Finally, the`member_virtual_points` are computed: the product of the member's points in -//! the bonding pool and the total inflow of balance units since the last time the member -//! claimed rewards -//! ```text -//! new_earnings_since_last_claim = current_total_earnings - member.reward_pool_total_earnings; -//! member_virtual_points = member.points * new_earnings_since_last_claim; -//! ``` -//! 2) Compute the `member_payout`: -//! ```text -//! member_pool_point_ratio = member_virtual_points / current_points; -//! member_payout = current_balance * member_pool_point_ratio; -//! ``` -//! 3) Transfer `member_payout` to the member -//! 4) For the member set: -//! ```text -//! member.reward_pool_total_earnings = current_total_earnings; -//! ``` -//! 5) For the pool set: -//! ```text -//! reward_pool.points = current_points - member_virtual_points; -//! reward_pool.balance = current_balance - member_payout; -//! reward_pool.total_earnings = current_total_earnings; -//! ``` -//! -//! _Note_: One short coming of this design is that new joiners can claim rewards for the era after -//! they join even though their funds did not contribute to the pools vote weight. When a -//! member joins, it's `reward_pool_total_earnings` field is set equal to the `total_earnings` -//! of the reward pool at that point in time. At best the reward pool has the rewards up through the -//! previous era. If a member joins prior to the election snapshot it will benefit from the -//! rewards for the active era despite not contributing to the pool's vote weight. If it joins -//! after the election snapshot is taken it will benefit from the rewards of the next _2_ eras -//! because it's vote weight will not be counted until the election snapshot in active era + 1. -//! Related: -// _Note to maintainers_: In order to ensure the reward account never falls below the existential -// deposit, at creation the reward account must be endowed with the existential deposit. All logic -// for calculating rewards then does not see that existential deposit as part of the free balance. -// See `RewardPool::current_balance`. +//! destination. +//! +//! The reward pool is not really a pool anymore, as it does not track points anymore. Instead, it +//! tracks, a virtual value called `reward_counter`, among a few other values. +//! +//! See [this link](https://hackmd.io/PFGn6wI5TbCmBYoEA_f2Uw) for an in-depth explanation of the +//! reward pool mechanism. //! //! **Relevant extrinsics:** //! @@ -299,11 +265,6 @@ //! funds via vote splitting. //! * PoolMembers cannot quickly transfer to another pool if they do no like nominations, instead //! they must wait for the unbonding duration. -//! -//! # Runtime builder warnings -//! -//! * Watch out for overflow of [`RewardPoints`] and [`BalanceOf`] types. Consider things like the -//! chains total issuance, staking reward rate, and burn rate. #![cfg_attr(not(feature = "std"), no_std)] @@ -316,11 +277,17 @@ use frame_support::{ Currency, Defensive, DefensiveOption, DefensiveResult, DefensiveSaturating, ExistenceRequirement, Get, }, - CloneNoBound, DefaultNoBound, RuntimeDebugNoBound, + transactional, CloneNoBound, DefaultNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; use sp_core::U256; -use sp_runtime::traits::{AccountIdConversion, Bounded, CheckedSub, Convert, Saturating, Zero}; +use sp_runtime::{ + traits::{ + AccountIdConversion, Bounded, CheckedAdd, CheckedSub, Convert, Saturating, StaticLookup, + Zero, + }, + FixedPointNumber, FixedPointOperand, +}; use sp_staking::{EraIndex, OnStakerSlash, StakingInterface}; use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, ops::Div, vec::Vec}; @@ -352,13 +319,13 @@ pub use weights::WeightInfo; /// The balance type used by the currency system. pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -/// Type used to track the points of a reward pool. -pub type RewardPoints = U256; /// Type used for unique identifier of each pool. pub type PoolId = u32; type UnbondingPoolsWithEra = BoundedBTreeMap, TotalUnbondingPools>; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + pub const POINTS_TO_BALANCE_INIT_RATIO: u32 = 1; /// Possible operations on the configuration values of this pallet. @@ -407,26 +374,50 @@ pub struct PoolMember { /// The quantity of points this member has in the bonded pool or in a sub pool if /// `Self::unbonding_era` is some. pub points: BalanceOf, - /// The reward pools total earnings _ever_ the last time this member claimed a payout. - /// Assuming no massive burning events, we expect this value to always be below total issuance. - /// This value lines up with the [`RewardPool::total_earnings`] after a member claims a - /// payout. - pub reward_pool_total_earnings: BalanceOf, + /// The reward counter at the time of this member's last payout claim. + pub last_recorded_reward_counter: T::RewardCounter, /// The eras in which this member is unbonding, mapped from era index to the number of /// points scheduled to unbond in the given era. pub unbonding_eras: BoundedBTreeMap, T::MaxUnbonding>, } impl PoolMember { - fn total_points(&self) -> BalanceOf { - self.active_points().saturating_add(self.unbonding_points()) + /// The pending rewards of this member. + fn pending_rewards( + &self, + current_reward_counter: T::RewardCounter, + ) -> Result, Error> { + // accuracy note: Reward counters are `FixedU128` with base of 10^18. This value is being + // multiplied by a point. The worse case of a point is 10x the granularity of the balance + // (10x is the common configuration of `MaxPointsToBalance`). + // + // Assuming roughly the current issuance of polkadot (12,047,781,394,999,601,455, which is + // 1.2 * 10^9 * 10^10 = 1.2 * 10^19), the worse case point value is around 10^20. + // + // The final multiplication is: + // + // rc * 10^20 / 10^18 = rc * 100 + // + // meaning that as long as reward_counter's value is less than 1/100th of its max capacity + // (u128::MAX_VALUE), `checked_mul_int` won't saturate. + // + // given the nature of reward counter being 'pending_rewards / pool_total_point', the only + // (unrealistic) way that super high values can be achieved is for a pool to suddenly + // receive massive rewards with a very very small amount of stake. In all normal pools, as + // the points increase, so does the rewards. Moreover, as long as rewards are not + // accumulated for astronomically large durations, + // `current_reward_counter.defensive_saturating_sub(self.last_recorded_reward_counter)` + // won't be extremely big. + (current_reward_counter.defensive_saturating_sub(self.last_recorded_reward_counter)) + .checked_mul_int(self.active_points()) + .ok_or(Error::::OverflowRisk) } /// Active balance of the member. /// /// This is derived from the ratio of points in the pool to which the member belongs to. /// Might return different values based on the pool state for the same member and points. - pub(crate) fn active_balance(&self) -> BalanceOf { + fn active_balance(&self) -> BalanceOf { if let Some(pool) = BondedPool::::get(self.pool_id).defensive() { pool.points_to_balance(self.points) } else { @@ -434,13 +425,18 @@ impl PoolMember { } } + /// Total points of this member, both active and unbonding. + fn total_points(&self) -> BalanceOf { + self.active_points().saturating_add(self.unbonding_points()) + } + /// Active points of the member. - pub(crate) fn active_points(&self) -> BalanceOf { + fn active_points(&self) -> BalanceOf { self.points } /// Inactive points of the member, waiting to be withdrawn. - pub(crate) fn unbonding_points(&self) -> BalanceOf { + fn unbonding_points(&self) -> BalanceOf { self.unbonding_eras .as_ref() .iter() @@ -478,7 +474,7 @@ impl PoolMember { self.points = new_points; Ok(()) } else { - Err(Error::::NotEnoughPointsToUnbond) + Err(Error::::MinimumBondNotMet) } } @@ -500,7 +496,7 @@ impl PoolMember { true } else { removed_points - .try_insert(*e, p.clone()) + .try_insert(*e, *p) .expect("source map is bounded, this is a subset, will be bounded; qed"); false } @@ -673,7 +669,7 @@ impl BondedPool { MaxPoolMembers::::get().map_or(true, |max| PoolMembers::::count() < max), Error::::MaxPoolMembers ); - self.member_counter = self.member_counter.defensive_saturating_add(1); + self.member_counter = self.member_counter.checked_add(1).ok_or(Error::::OverflowRisk)?; Ok(()) } @@ -750,19 +746,19 @@ impl BondedPool { // We checked for zero above .div(bonded_balance); - let min_points_to_balance = T::MinPointsToBalance::get(); + let max_points_to_balance = T::MaxPointsToBalance::get(); // Pool points can inflate relative to balance, but only if the pool is slashed. // If we cap the ratio of points:balance so one cannot join a pool that has been slashed - // by `min_points_to_balance`%, if not zero. + // by `max_points_to_balance`%, if not zero. ensure!( - points_to_balance_ratio_floor < min_points_to_balance.into(), + points_to_balance_ratio_floor < max_points_to_balance.into(), Error::::OverflowRisk ); - // while restricting the balance to `min_points_to_balance` of max total issuance, + // while restricting the balance to `max_points_to_balance` of max total issuance, let next_bonded_balance = bonded_balance.saturating_add(new_funds); ensure!( - next_bonded_balance < BalanceOf::::max_value().div(min_points_to_balance.into()), + next_bonded_balance < BalanceOf::::max_value().div(max_points_to_balance.into()), Error::::OverflowRisk ); @@ -790,45 +786,60 @@ impl BondedPool { let is_depositor = *target_account == self.roles.depositor; let is_full_unbond = unbonding_points == target_member.active_points(); + let balance_after_unbond = { + let new_depositor_points = + target_member.active_points().saturating_sub(unbonding_points); + let mut target_member_after_unbond = (*target_member).clone(); + target_member_after_unbond.points = new_depositor_points; + target_member_after_unbond.active_balance() + }; + // any partial unbonding is only ever allowed if this unbond is permissioned. ensure!( is_permissioned || is_full_unbond, Error::::PartialUnbondNotAllowedPermissionlessly ); + // any unbond must comply with the balance condition: + ensure!( + is_full_unbond || + balance_after_unbond >= + if is_depositor { + Pallet::::depositor_min_bond() + } else { + MinJoinBond::::get() + }, + Error::::MinimumBondNotMet + ); + + // additional checks: match (is_permissioned, is_depositor) { - // If the pool is blocked, then an admin with kicking permissions can remove a - // member. If the pool is being destroyed, anyone can remove a member + (true, false) => (), + (true, true) => { + // permission depositor unbond: if destroying and pool is empty, always allowed, + // with no additional limits. + if self.is_destroying_and_only_depositor(target_member.active_points()) { + // everything good, let them unbond anything. + } else { + // depositor cannot fully unbond yet. + ensure!(!is_full_unbond, Error::::MinimumBondNotMet); + } + }, (false, false) => { + // If the pool is blocked, then an admin with kicking permissions can remove a + // member. If the pool is being destroyed, anyone can remove a member + debug_assert!(is_full_unbond); ensure!( self.can_kick(caller) || self.is_destroying(), Error::::NotKickerOrDestroying ) }, - // Any member who is not the depositor can always unbond themselves - (true, false) => (), - (_, true) => { - if self.is_destroying_and_only_depositor(target_member.active_points()) { - // if the pool is about to be destroyed, anyone can unbond the depositor, and - // they can fully unbond. - } else { - // only the depositor can partially unbond, and they can only unbond up to the - // threshold. - ensure!(is_permissioned, Error::::DoesNotHavePermission); - let balance_after_unbond = { - let new_depositor_points = - target_member.active_points().saturating_sub(unbonding_points); - let mut depositor_after_unbond = (*target_member).clone(); - depositor_after_unbond.points = new_depositor_points; - depositor_after_unbond.active_balance() - }; - ensure!( - balance_after_unbond >= MinCreateBond::::get(), - Error::::NotOnlyPoolMember - ); - } + (false, true) => { + // the depositor can simply not be unbonded permissionlessly, period. + return Err(Error::::DoesNotHavePermission.into()) }, }; + Ok(()) } @@ -839,25 +850,14 @@ impl BondedPool { &self, caller: &T::AccountId, target_account: &T::AccountId, - target_member: &PoolMember, - sub_pools: &SubPools, ) -> Result<(), DispatchError> { - if *target_account == self.roles.depositor { - ensure!( - sub_pools.sum_unbonding_points() == target_member.unbonding_points(), - Error::::NotOnlyPoolMember - ); - debug_assert_eq!(self.member_counter, 1, "only member must exist at this point"); - Ok(()) - } else { - // This isn't a depositor - let is_permissioned = caller == target_account; - ensure!( - is_permissioned || self.can_kick(caller) || self.is_destroying(), - Error::::NotKickerOrDestroying - ); - Ok(()) - } + // This isn't a depositor + let is_permissioned = caller == target_account; + ensure!( + is_permissioned || self.can_kick(caller) || self.is_destroying(), + Error::::NotKickerOrDestroying + ); + Ok(()) } /// Bond exactly `amount` from `who`'s funds into this pool. @@ -904,16 +904,6 @@ impl BondedPool { Ok(points_issued) } - /// If `n` saturates at it's upper bound, mark the pool as destroying. This is useful when a - /// number saturating indicates the pool can no longer correctly keep track of state. - fn bound_check(&mut self, n: U256) -> U256 { - if n == U256::max_value() { - self.set_state(PoolState::Destroying) - } - - n - } - // Set the state of `self`, and deposit an event if the state changed. State should never be set // directly in in order to ensure a state change event is always correctly deposited. fn set_state(&mut self, state: PoolState) { @@ -928,45 +918,104 @@ impl BondedPool { } /// A reward pool. +/// +/// A reward pool is not so much a pool anymore, since it does not contain any shares or points. +/// Rather, simply to fit nicely next to bonded pool and unbonding pools in terms of terminology. In +/// reality, a reward pool is just a container for a few pool-dependent data related to the rewards. #[derive(Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebugNoBound)] -#[cfg_attr(feature = "std", derive(Clone, PartialEq))] +#[cfg_attr(feature = "std", derive(Clone, PartialEq, DefaultNoBound))] #[codec(mel_bound(T: Config))] #[scale_info(skip_type_params(T))] pub struct RewardPool { - /// The balance of this reward pool after the last claimed payout. - pub balance: BalanceOf, - /// The total earnings _ever_ of this reward pool after the last claimed payout. I.E. the sum - /// of all incoming balance through the pools life. + /// The last recorded value of the reward counter. + /// + /// This is updated ONLY when the points in the bonded pool change, which means `join`, + /// `bond_extra` and `unbond`, all of which is done through `update_recorded`. + last_recorded_reward_counter: T::RewardCounter, + /// The last recorded total payouts of the reward pool. /// - /// NOTE: We assume this will always be less than total issuance and thus can use the runtimes - /// `Balance` type. However in a chain with a burn rate higher than the rate this increases, - /// this type should be bigger than `Balance`. - pub total_earnings: BalanceOf, - /// The total points of this reward pool after the last claimed payout. - pub points: RewardPoints, + /// Payouts is essentially income of the pool. + /// + /// Update criteria is same as that of `last_recorded_reward_counter`. + last_recorded_total_payouts: BalanceOf, + /// Total amount that this pool has paid out so far to the members. + total_rewards_claimed: BalanceOf, } impl RewardPool { - /// Mutate the reward pool by updating the total earnings and current free balance. - fn update_total_earnings_and_balance(&mut self, id: PoolId) { - let current_balance = Self::current_balance(id); - // The earnings since the last time it was updated - let new_earnings = current_balance.saturating_sub(self.balance); - // The lifetime earnings of the of the reward pool - self.total_earnings = new_earnings.saturating_add(self.total_earnings); - self.balance = current_balance; - } - - /// Get a reward pool and update its total earnings and balance - fn get_and_update(id: PoolId) -> Option { - RewardPools::::get(id).map(|mut r| { - r.update_total_earnings_and_balance(id); - r - }) - } - - /// The current balance of the reward pool. Never access the reward pools free balance directly. - /// The existential deposit was not received as a reward, so the reward pool can not use it. + /// Getter for [`RewardPool::last_recorded_reward_counter`]. + pub(crate) fn last_recorded_reward_counter(&self) -> T::RewardCounter { + self.last_recorded_reward_counter + } + + /// Register some rewards that are claimed from the pool by the members. + fn register_claimed_reward(&mut self, reward: BalanceOf) { + self.total_rewards_claimed = self.total_rewards_claimed.saturating_add(reward); + } + + /// Update the recorded values of the pool. + fn update_records(&mut self, id: PoolId, bonded_points: BalanceOf) -> Result<(), Error> { + let balance = Self::current_balance(id); + self.last_recorded_reward_counter = self.current_reward_counter(id, bonded_points)?; + self.last_recorded_total_payouts = balance + .checked_add(&self.total_rewards_claimed) + .ok_or(Error::::OverflowRisk)?; + Ok(()) + } + + /// Get the current reward counter, based on the given `bonded_points` being the state of the + /// bonded pool at this time. + fn current_reward_counter( + &self, + id: PoolId, + bonded_points: BalanceOf, + ) -> Result> { + let balance = Self::current_balance(id); + let payouts_since_last_record = balance + .saturating_add(self.total_rewards_claimed) + .saturating_sub(self.last_recorded_total_payouts); + + // * accuracy notes regarding the multiplication in `checked_from_rational`: + // `payouts_since_last_record` is a subset of the total_issuance at the very + // worse. `bonded_points` are similarly, in a non-slashed pool, have the same granularity as + // balance, and are thus below within the range of total_issuance. In the worse case + // scenario, for `saturating_from_rational`, we have: + // + // dot_total_issuance * 10^18 / `minJoinBond` + // + // assuming `MinJoinBond == ED` + // + // dot_total_issuance * 10^18 / 10^10 = dot_total_issuance * 10^8 + // + // which, with the current numbers, is a miniscule fraction of the u128 capacity. + // + // Thus, adding two values of type reward counter should be safe for ages in a chain like + // Polkadot. The important note here is that `reward_pool.last_recorded_reward_counter` only + // ever accumulates, but its semantics imply that it is less than total_issuance, when + // represented as `FixedU128`, which means it is less than `total_issuance * 10^18`. + // + // * accuracy notes regarding `checked_from_rational` collapsing to zero, meaning that no + // reward can be claimed: + // + // largest `bonded_points`, such that the reward counter is non-zero, with `FixedU128` + // will be when the payout is being computed. This essentially means `payout/bonded_points` + // needs to be more than 1/1^18. Thus, assuming that `bonded_points` will always be less + // than `10 * dot_total_issuance`, if the reward_counter is the smallest possible value, + // the value of the reward being calculated is: + // + // x / 10^20 = 1/ 10^18 + // + // x = 100 + // + // which is basically 10^-8 DOTs. See `smallest_claimable_reward` for an example of this. + T::RewardCounter::checked_from_rational(payouts_since_last_record, bonded_points) + .and_then(|ref r| self.last_recorded_reward_counter.checked_add(r)) + .ok_or(Error::::OverflowRisk) + } + + /// Current free balance of the reward pool. + /// + /// This is sum of all the rewards that are claimable by pool members. fn current_balance(id: PoolId) -> BalanceOf { T::Currency::free_balance(&Pallet::::create_reward_account(id)) .saturating_sub(T::Currency::minimum_balance()) @@ -1060,15 +1109,6 @@ impl SubPools { self } - /// The sum of all unbonding points, regardless of whether they are actually unlocked or not. - fn sum_unbonding_points(&self) -> BalanceOf { - self.no_era.points.saturating_add( - self.with_era - .values() - .fold(BalanceOf::::zero(), |acc, pool| acc.saturating_add(pool.points)), - ) - } - /// The sum of all unbonding balance, regardless of whether they are actually unlocked or not. #[cfg(any(test, debug_assertions))] fn sum_unbonding_balance(&self) -> BalanceOf { @@ -1098,9 +1138,10 @@ pub mod pallet { use super::*; use frame_support::traits::StorageVersion; use frame_system::{ensure_signed, pallet_prelude::*}; + use sp_runtime::traits::CheckedAdd; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] #[pallet::generate_store(pub(crate) trait Store)] @@ -1116,19 +1157,53 @@ pub mod pallet { type WeightInfo: weights::WeightInfo; /// The nominating balance. - type Currency: Currency; + type Currency: Currency; + + /// Sadly needed to bound it to `FixedPointOperand`. + // The only alternative is to sprinkle a `where BalanceOf: FixedPointOperand` in roughly + // a million places, so we prefer doing this. + type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned + + codec::FullCodec + + MaybeSerializeDeserialize + + sp_std::fmt::Debug + + Default + + FixedPointOperand + + CheckedAdd + + TypeInfo + + MaxEncodedLen; + + /// The type that is used for reward counter. + /// + /// The arithmetic of the reward counter might saturate based on the size of the + /// `Currency::Balance`. If this happens, operations fails. Nonetheless, this type should be + /// chosen such that this failure almost never happens, as if it happens, the pool basically + /// needs to be dismantled (or all pools migrated to a larger `RewardCounter` type, which is + /// a PITA to do). + /// + /// See the inline code docs of `Member::pending_rewards` and `RewardPool::update_recorded` + /// for example analysis. A [`sp_runtime::FixedU128`] should be fine for chains with balance + /// types similar to that of Polkadot and Kusama, in the absence of severe slashing (or + /// prevented via a reasonable `MaxPointsToBalance`), for many many years to come. + type RewardCounter: FixedPointNumber + MaxEncodedLen + TypeInfo + Default + codec::FullCodec; /// The nomination pool's pallet id. #[pallet::constant] type PalletId: Get; - /// The minimum pool points-to-balance ratio that must be maintained for it to be `open`. + /// The maximum pool points-to-balance ratio that an `open` pool can have. + /// /// This is important in the event slashing takes place and the pool's points-to-balance /// ratio becomes disproportional. + /// + /// Moreover, this relates to the `RewardCounter` type as well, as the arithmetic operations + /// are a function of number of points, and by setting this value to e.g. 10, you ensure + /// that the total number of points in the system are at most 10 times the total_issuance of + /// the chain, in the absolute worse case. + /// /// For a value of 10, the threshold would be a pool points-to-balance ratio of 10:1. /// Such a scenario would also be the equivalent of the pool being 90% slashed. #[pallet::constant] - type MinPointsToBalance: Get; + type MaxPointsToBalance: Get; /// Infallible method for converting `Currency::Balance` to `U256`. type BalanceToU256: Convert, U256>; @@ -1279,7 +1354,7 @@ pub mod pallet { /// pool. /// - `points` is the number of points that are issued as a result of `balance` being /// dissolved into the corresponding unbonding pool. - /// + /// - `era` is the era in which the balance will be unbonded. /// In the absence of slashing, these values will match. In the presence of slashing, the /// number of points that are issued in the unbonding pool will be less than the amount /// requested to be unbonded. @@ -1288,6 +1363,7 @@ pub mod pallet { pool_id: PoolId, balance: BalanceOf, points: BalanceOf, + era: EraIndex, }, /// A member has withdrawn from their pool. /// @@ -1344,15 +1420,16 @@ pub mod pallet { /// None of the funds can be withdrawn yet because the bonding duration has not passed. CannotWithdrawAny, /// The amount does not meet the minimum bond to either join or create a pool. + /// + /// The depositor can never unbond to a value less than + /// `Pallet::depositor_min_bond`. The caller does not have nominating + /// permissions for the pool. Members can never unbond to a value below `MinJoinBond`. MinimumBondNotMet, /// The transaction could not be executed due to overflow risk for the pool. OverflowRisk, /// A pool must be in [`PoolState::Destroying`] in order for the depositor to unbond or for /// other members to be permissionlessly unbonded. NotDestroying, - /// The depositor must be the only member in the bonded pool in order to unbond. And the - /// depositor must be the only member in the sub pools in order to withdraw unbonded. - NotOnlyPoolMember, /// The caller does not have nominating permissions for the pool. NotNominator, /// Either a) the caller cannot make a valid kick or b) the pool is not destroying. @@ -1372,8 +1449,6 @@ pub mod pallet { /// Some error occurred that should never happen. This should be reported to the /// maintainers. Defensive(DefensiveError), - /// Not enough points. Ty unbonding less. - NotEnoughPointsToUnbond, /// Partial unbonding now allowed permissionlessly. PartialUnbondNotAllowedPermissionlessly, } @@ -1412,6 +1487,7 @@ pub mod pallet { /// `existential deposit + amount` in their account. /// * Only a pool with [`PoolState::Open`] can be joined #[pallet::weight(T::WeightInfo::join())] + #[transactional] pub fn join( origin: OriginFor, #[pallet::compact] amount: BalanceOf, @@ -1426,10 +1502,10 @@ pub mod pallet { let mut bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; bonded_pool.ok_to_join(amount)?; - // We just need its total earnings at this point in time, but we don't need to write it - // because we are not adjusting its points (all other values can calculated virtual). - let reward_pool = RewardPool::::get_and_update(pool_id) + let mut reward_pool = RewardPools::::get(pool_id) .defensive_ok_or::>(DefensiveError::RewardPoolNotFound.into())?; + // IMPORTANT: reward pool records must be updated with the old points. + reward_pool.update_records(pool_id, bonded_pool.points)?; bonded_pool.try_inc_members()?; let points_issued = bonded_pool.try_bond_funds(&who, amount, BondType::Later)?; @@ -1439,13 +1515,9 @@ pub mod pallet { PoolMember:: { pool_id, points: points_issued, - // At best the reward pool has the rewards up through the previous era. If the - // member joins prior to the snapshot they will benefit from the rewards of - // the active era despite not contributing to the pool's vote weight. If they - // join after the snapshot is taken they will benefit from the rewards of the - // next 2 eras because their vote weight will not be counted until the - // snapshot in active era + 1. - reward_pool_total_earnings: reward_pool.total_earnings, + // we just updated `last_known_reward_counter` to the current one in + // `update_recorded`. + last_recorded_reward_counter: reward_pool.last_recorded_reward_counter(), unbonding_eras: Default::default(), }, ); @@ -1456,7 +1528,9 @@ pub mod pallet { bonded: amount, joined: true, }); + bonded_pool.put(); + RewardPools::::insert(pool_id, reward_pool); Ok(()) } @@ -1465,6 +1539,8 @@ pub mod pallet { /// /// Additional funds can come from either the free balance of the account, of from the /// accumulated rewards, see [`BondExtra`]. + /// + /// Bonding extra funds implies an automatic payout of all pending rewards as well. // NOTE: this transaction is implemented with the sole purpose of readability and // correctness, not optimization. We read/write several storage items multiple times instead // of just once, in the spirit reusing code. @@ -1472,23 +1548,28 @@ pub mod pallet { T::WeightInfo::bond_extra_transfer() .max(T::WeightInfo::bond_extra_reward()) )] + #[transactional] pub fn bond_extra(origin: OriginFor, extra: BondExtra>) -> DispatchResult { let who = ensure_signed(origin)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&who)?; + // payout related stuff: we must claim the payouts, and updated recorded payout data + // before updating the bonded pool points, similar to that of `join` transaction. + reward_pool.update_records(bonded_pool.id, bonded_pool.points)?; + // TODO: optimize this to not touch the free balance of `who ` at all in benchmarks. + // Currently, bonding rewards is like a batch. In the same PR, also make this function + // take a boolean argument that make it either 100% pure (no storage update), or make it + // also emit event and do the transfer. #11671 + let claimed = + Self::do_reward_payout(&who, &mut member, &mut bonded_pool, &mut reward_pool)?; + let (points_issued, bonded) = match extra { BondExtra::FreeBalance(amount) => (bonded_pool.try_bond_funds(&who, amount, BondType::Later)?, amount), - BondExtra::Rewards => { - let claimed = Self::do_reward_payout( - &who, - &mut member, - &mut bonded_pool, - &mut reward_pool, - )?; - (bonded_pool.try_bond_funds(&who, claimed, BondType::Later)?, claimed) - }, + BondExtra::Rewards => + (bonded_pool.try_bond_funds(&who, claimed, BondType::Later)?, claimed), }; + bonded_pool.ok_to_be_open(bonded)?; member.points = member.points.saturating_add(points_issued); @@ -1510,6 +1591,7 @@ pub mod pallet { /// The member will earn rewards pro rata based on the members stake vs the sum of the /// members in the pools stake. Rewards do not "expire". #[pallet::weight(T::WeightInfo::claim_payout())] + #[transactional] pub fn claim_payout(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&who)?; @@ -1549,26 +1631,24 @@ pub mod pallet { /// there are too many unlocking chunks, the result of this call will likely be the /// `NoMoreChunks` error from the staking system. #[pallet::weight(T::WeightInfo::unbond())] + #[transactional] pub fn unbond( origin: OriginFor, - member_account: T::AccountId, + member_account: AccountIdLookupOf, #[pallet::compact] unbonding_points: BalanceOf, ) -> DispatchResult { - let caller = ensure_signed(origin)?; + let who = ensure_signed(origin)?; + let member_account = T::Lookup::lookup(member_account)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&member_account)?; - bonded_pool.ok_to_unbond_with(&caller, &member_account, &member, unbonding_points)?; + bonded_pool.ok_to_unbond_with(&who, &member_account, &member, unbonding_points)?; // Claim the the payout prior to unbonding. Once the user is unbonding their points no // longer exist in the bonded pool and thus they can no longer claim their payouts. It // is not strictly necessary to claim the rewards, but we do it here for UX. - Self::do_reward_payout( - &member_account, - &mut member, - &mut bonded_pool, - &mut reward_pool, - )?; + let _ = reward_pool.update_records(bonded_pool.id, bonded_pool.points)?; + let _ = Self::do_reward_payout(&who, &mut member, &mut bonded_pool, &mut reward_pool)?; let current_era = T::StakingInterface::current_era(); let unbond_era = T::StakingInterface::bonding_duration().saturating_add(current_era); @@ -1610,6 +1690,7 @@ pub mod pallet { pool_id: member.pool_id, points: points_unbonded, balance: unbonding_balance, + era: unbond_era, }); // Now that we know everything has worked write the items to storage. @@ -1626,6 +1707,7 @@ pub mod pallet { /// would probably see an error like `NoMoreChunks` emitted from the staking system when /// they attempt to unbond. #[pallet::weight(T::WeightInfo::pool_withdraw_unbonded(*num_slashing_spans))] + #[transactional] pub fn pool_withdraw_unbonded( origin: OriginFor, pool_id: PoolId, @@ -1662,12 +1744,14 @@ pub mod pallet { #[pallet::weight( T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans) )] + #[transactional] pub fn withdraw_unbonded( origin: OriginFor, - member_account: T::AccountId, + member_account: AccountIdLookupOf, num_slashing_spans: u32, ) -> DispatchResultWithPostInfo { let caller = ensure_signed(origin)?; + let member_account = T::Lookup::lookup(member_account)?; let mut member = PoolMembers::::get(&member_account).ok_or(Error::::PoolMemberNotFound)?; let current_era = T::StakingInterface::current_era(); @@ -1677,12 +1761,7 @@ pub mod pallet { let mut sub_pools = SubPoolsStorage::::get(member.pool_id) .defensive_ok_or::>(DefensiveError::SubPoolsNotFound.into())?; - bonded_pool.ok_to_withdraw_unbonded_with( - &caller, - &member_account, - &member, - &sub_pools, - )?; + bonded_pool.ok_to_withdraw_unbonded_with(&caller, &member_account)?; // NOTE: must do this after we have done the `ok_to_withdraw_unbonded_other_with` check. let withdrawn_points = member.withdraw_unlocked(current_era); @@ -1787,22 +1866,20 @@ pub mod pallet { /// In addition to `amount`, the caller will transfer the existential deposit; so the caller /// needs at have at least `amount + existential_deposit` transferrable. #[pallet::weight(T::WeightInfo::create())] + #[transactional] pub fn create( origin: OriginFor, #[pallet::compact] amount: BalanceOf, - root: T::AccountId, - nominator: T::AccountId, - state_toggler: T::AccountId, + root: AccountIdLookupOf, + nominator: AccountIdLookupOf, + state_toggler: AccountIdLookupOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let root = T::Lookup::lookup(root)?; + let nominator = T::Lookup::lookup(nominator)?; + let state_toggler = T::Lookup::lookup(state_toggler)?; - ensure!( - amount >= - T::StakingInterface::minimum_bond() - .max(MinCreateBond::::get()) - .max(MinJoinBond::::get()), - Error::::MinimumBondNotMet - ); + ensure!(amount >= Pallet::::depositor_min_bond(), Error::::MinimumBondNotMet); ensure!( MaxPools::::get() .map_or(true, |max_pools| BondedPools::::count() < max_pools), @@ -1839,23 +1916,22 @@ pub mod pallet { PoolMember:: { pool_id, points, - reward_pool_total_earnings: Zero::zero(), + last_recorded_reward_counter: Zero::zero(), unbonding_eras: Default::default(), }, ); RewardPools::::insert( pool_id, RewardPool:: { - balance: Zero::zero(), - points: U256::zero(), - total_earnings: Zero::zero(), + last_recorded_reward_counter: Zero::zero(), + last_recorded_total_payouts: Zero::zero(), + total_rewards_claimed: Zero::zero(), }, ); ReversePoolIdLookup::::insert(bonded_pool.bonded_account(), pool_id); - Self::deposit_event(Event::::Created { - depositor: who.clone(), - pool_id: pool_id.clone(), - }); + + Self::deposit_event(Event::::Created { depositor: who.clone(), pool_id }); + Self::deposit_event(Event::::Bonded { member: who, pool_id, @@ -1875,6 +1951,7 @@ pub mod pallet { /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. #[pallet::weight(T::WeightInfo::nominate(validators.len() as u32))] + #[transactional] pub fn nominate( origin: OriginFor, pool_id: PoolId, @@ -1888,9 +1965,16 @@ pub mod pallet { /// Set a new state for the pool. /// - /// The dispatch origin of this call must be signed by the state toggler, or the root role - /// of the pool. + /// If a pool is already in the `Destroying` state, then under no condition can its state + /// change again. + /// + /// The dispatch origin of this call must be either: + /// + /// 1. signed by the state toggler, or the root role of the pool, + /// 2. if the pool conditions to be open are NOT met (as described by `ok_to_be_open`), and + /// then the state of the pool can be permissionlessly changed to `Destroying`. #[pallet::weight(T::WeightInfo::set_state())] + #[transactional] pub fn set_state( origin: OriginFor, pool_id: PoolId, @@ -1921,6 +2005,7 @@ pub mod pallet { /// The dispatch origin of this call must be signed by the state toggler, or the root role /// of the pool. #[pallet::weight(T::WeightInfo::set_metadata(metadata.len() as u32))] + #[transactional] pub fn set_metadata( origin: OriginFor, pool_id: PoolId, @@ -1952,6 +2037,7 @@ pub mod pallet { /// * `max_members` - Set [`MaxPoolMembers`]. /// * `max_members_per_pool` - Set [`MaxPoolMembersPerPool`]. #[pallet::weight(T::WeightInfo::set_configs())] + #[transactional] pub fn set_configs( origin: OriginFor, min_join_bond: ConfigOp>, @@ -1988,6 +2074,7 @@ pub mod pallet { /// It emits an event, notifying UIs of the role change. This event is quite relevant to /// most pool members and they should be informed of changes to pool roles. #[pallet::weight(T::WeightInfo::update_roles())] + #[transactional] pub fn update_roles( origin: OriginFor, pool_id: PoolId, @@ -2040,6 +2127,7 @@ pub mod pallet { /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. #[pallet::weight(T::WeightInfo::chill())] + #[transactional] pub fn chill(origin: OriginFor, pool_id: PoolId) -> DispatchResult { let who = ensure_signed(origin)?; let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; @@ -2052,14 +2140,9 @@ pub mod pallet { impl Hooks> for Pallet { fn integrity_test() { assert!( - T::MinPointsToBalance::get() > 0, + T::MaxPointsToBalance::get() > 0, "Minimum points to balance ratio must be greater than 0" ); - assert!( - sp_std::mem::size_of::() >= - 2 * sp_std::mem::size_of::>(), - "bit-length of the reward points must be at least twice as much as balance" - ); assert!( T::StakingInterface::bonding_duration() < TotalUnbondingPools::::get(), "There must be more unbonding pools then the bonding duration / @@ -2071,6 +2154,36 @@ pub mod pallet { } impl Pallet { + /// Returns the pending rewards for the specified `member_account`. + /// + /// In the case of error, `None` is returned. + pub fn pending_rewards(member_account: T::AccountId) -> Option> { + if let Some(pool_member) = PoolMembers::::get(member_account) { + if let Some((reward_pool, bonded_pool)) = RewardPools::::get(pool_member.pool_id) + .zip(BondedPools::::get(pool_member.pool_id)) + { + let current_reward_counter = reward_pool + .current_reward_counter(pool_member.pool_id, bonded_pool.points) + .ok()?; + return pool_member.pending_rewards(current_reward_counter).ok() + } + } + + None + } + + /// The amount of bond that MUST REMAIN IN BONDED in ALL POOLS. + /// + /// It is the responsibility of the depositor to put these funds into the pool initially. Upon + /// unbond, they can never unbond to a value below this amount. + /// + /// It is essentially `max { MinNominatorBond, MinCreateBond, MinJoinBond }`, where the former + /// is coming from the staking pallet and the latter two are configured in this pallet. + fn depositor_min_bond() -> BalanceOf { + T::StakingInterface::minimum_bond() + .max(MinCreateBond::::get()) + .max(MinJoinBond::::get()) + } /// Remove everything related to the given bonded pool. /// /// All sub-pools are also deleted. All accounts are dusted and the leftover of the reward @@ -2106,7 +2219,7 @@ impl Pallet { ExistenceRequirement::AllowDeath, ); - // TODO: this is purely defensive. + // NOTE: this is purely defensive. T::Currency::make_free_balance_be(&reward_account, Zero::zero()); T::Currency::make_free_balance_be(&bonded_pool.bonded_account(), Zero::zero()); @@ -2199,75 +2312,6 @@ impl Pallet { .div(current_points) } - /// Calculate the rewards for `member`. - /// - /// Returns the payout amount. - fn calculate_member_payout( - member: &mut PoolMember, - bonded_pool: &mut BondedPool, - reward_pool: &mut RewardPool, - ) -> Result, DispatchError> { - let u256 = |x| T::BalanceToU256::convert(x); - let balance = |x| T::U256ToBalance::convert(x); - - let last_total_earnings = reward_pool.total_earnings; - reward_pool.update_total_earnings_and_balance(bonded_pool.id); - - // Notice there is an edge case where total_earnings have not increased and this is zero - let new_earnings = u256(reward_pool.total_earnings.saturating_sub(last_total_earnings)); - - // The new points that will be added to the pool. For every unit of balance that has been - // earned by the reward pool, we inflate the reward pool points by `bonded_pool.points`. In - // effect this allows each, single unit of balance (e.g. plank) to be divvied up pro rata - // among members based on points. - let new_points = u256(bonded_pool.points).saturating_mul(new_earnings); - - // The points of the reward pool after taking into account the new earnings. Notice that - // this only stays even or increases over time except for when we subtract member virtual - // shares. - let current_points = bonded_pool.bound_check(reward_pool.points.saturating_add(new_points)); - - // The rewards pool's earnings since the last time this member claimed a payout. - let new_earnings_since_last_claim = - reward_pool.total_earnings.saturating_sub(member.reward_pool_total_earnings); - - // The points of the reward pool that belong to the member. - let member_virtual_points = - // The members portion of the reward pool - u256(member.active_points()) - // times the amount the pool has earned since the member last claimed. - .saturating_mul(u256(new_earnings_since_last_claim)); - - let member_payout = if member_virtual_points.is_zero() || - current_points.is_zero() || - reward_pool.balance.is_zero() - { - Zero::zero() - } else { - // Equivalent to `(member_virtual_points / current_points) * reward_pool.balance` - let numerator = { - let numerator = member_virtual_points.saturating_mul(u256(reward_pool.balance)); - bonded_pool.bound_check(numerator) - }; - balance( - numerator - // We check for zero above - .div(current_points), - ) - }; - - // Record updates - if reward_pool.total_earnings == BalanceOf::::max_value() { - bonded_pool.set_state(PoolState::Destroying); - }; - - member.reward_pool_total_earnings = reward_pool.total_earnings; - reward_pool.points = current_points.saturating_sub(member_virtual_points); - reward_pool.balance = reward_pool.balance.saturating_sub(member_payout); - - Ok(member_payout) - } - /// If the member has some rewards, transfer a payout from the reward pool to the member. // Emits events and potentially modifies pool state if any arithmetic saturates, but does // not persist any of the mutable inputs to storage. @@ -2278,38 +2322,37 @@ impl Pallet { reward_pool: &mut RewardPool, ) -> Result, DispatchError> { debug_assert_eq!(member.pool_id, bonded_pool.id); + // a member who has no skin in the game anymore cannot claim any rewards. ensure!(!member.active_points().is_zero(), Error::::FullyUnbonding); - let was_destroying = bonded_pool.is_destroying(); - let member_payout = Self::calculate_member_payout(member, bonded_pool, reward_pool)?; + let current_reward_counter = + reward_pool.current_reward_counter(bonded_pool.id, bonded_pool.points)?; + let pending_rewards = member.pending_rewards(current_reward_counter)?; - if member_payout.is_zero() { - return Ok(member_payout) + if pending_rewards.is_zero() { + return Ok(pending_rewards) } + // IFF the reward is non-zero alter the member and reward pool info. + member.last_recorded_reward_counter = current_reward_counter; + reward_pool.register_claimed_reward(pending_rewards); + // Transfer payout to the member. T::Currency::transfer( &bonded_pool.reward_account(), &member_account, - member_payout, + pending_rewards, ExistenceRequirement::AllowDeath, )?; Self::deposit_event(Event::::PaidOut { member: member_account.clone(), pool_id: member.pool_id, - payout: member_payout, + payout: pending_rewards, }); - if bonded_pool.is_destroying() && !was_destroying { - Self::deposit_event(Event::::StateChanged { - pool_id: member.pool_id, - new_state: PoolState::Destroying, - }); - } - - Ok(member_payout) + Ok(pending_rewards) } /// Ensure the correctness of the state of this pallet. @@ -2432,7 +2475,8 @@ impl Pallet { member: T::AccountId, ) -> DispatchResult { let points = PoolMembers::::get(&member).map(|d| d.active_points()).unwrap_or_default(); - Self::unbond(origin, member, points) + let member_lookup = T::Lookup::unlookup(member); + Self::unbond(origin, member_lookup, points) } } diff --git a/frame/nomination-pools/src/migration.rs b/frame/nomination-pools/src/migration.rs index e23a35fe85602..412c954a2bbf3 100644 --- a/frame/nomination-pools/src/migration.rs +++ b/frame/nomination-pools/src/migration.rs @@ -16,11 +16,12 @@ // limitations under the License. use super::*; +use crate::log; +use frame_support::traits::OnRuntimeUpgrade; +use sp_std::collections::btree_map::BTreeMap; pub mod v1 { use super::*; - use crate::log; - use frame_support::traits::OnRuntimeUpgrade; #[derive(Decode)] pub struct OldPoolRoles { @@ -103,3 +104,283 @@ pub mod v1 { } } } + +pub mod v2 { + use super::*; + use sp_runtime::Perbill; + + #[test] + fn migration_assumption_is_correct() { + // this migrations cleans all the reward accounts to contain exactly ed, and all members + // having no claimable rewards. In this state, all fields of the `RewardPool` and + // `member.last_recorded_reward_counter` are all zero. + use crate::mock::*; + ExtBuilder::default().build_and_execute(|| { + let join = |x| { + Balances::make_free_balance_be(&x, Balances::minimum_balance() + 10); + frame_support::assert_ok!(Pools::join(Origin::signed(x), 10, 1)); + }; + + assert_eq!(BondedPool::::get(1).unwrap().points, 10); + assert_eq!( + RewardPools::::get(1).unwrap(), + RewardPool { ..Default::default() } + ); + assert_eq!( + PoolMembers::::get(10).unwrap().last_recorded_reward_counter, + Zero::zero() + ); + + join(20); + assert_eq!(BondedPool::::get(1).unwrap().points, 20); + assert_eq!( + RewardPools::::get(1).unwrap(), + RewardPool { ..Default::default() } + ); + assert_eq!( + PoolMembers::::get(10).unwrap().last_recorded_reward_counter, + Zero::zero() + ); + assert_eq!( + PoolMembers::::get(20).unwrap().last_recorded_reward_counter, + Zero::zero() + ); + + join(30); + assert_eq!(BondedPool::::get(1).unwrap().points, 30); + assert_eq!( + RewardPools::::get(1).unwrap(), + RewardPool { ..Default::default() } + ); + assert_eq!( + PoolMembers::::get(10).unwrap().last_recorded_reward_counter, + Zero::zero() + ); + assert_eq!( + PoolMembers::::get(20).unwrap().last_recorded_reward_counter, + Zero::zero() + ); + assert_eq!( + PoolMembers::::get(30).unwrap().last_recorded_reward_counter, + Zero::zero() + ); + }); + } + + #[derive(Decode)] + pub struct OldRewardPool { + pub balance: B, + pub total_earnings: B, + pub points: U256, + } + + #[derive(Decode)] + pub struct OldPoolMember { + pub pool_id: PoolId, + pub points: BalanceOf, + pub reward_pool_total_earnings: BalanceOf, + pub unbonding_eras: BoundedBTreeMap, T::MaxUnbonding>, + } + + /// Migrate the pool reward scheme to the new version, as per + /// . + pub struct MigrateToV2(sp_std::marker::PhantomData); + impl MigrateToV2 { + fn run(current: StorageVersion) -> Weight { + let mut reward_pools_translated = 0u64; + let mut members_translated = 0u64; + // just for logging. + let mut total_value_locked = BalanceOf::::zero(); + let mut total_points_locked = BalanceOf::::zero(); + + // store each member of the pool, with their active points. In the process, migrate + // their data as well. + let mut temp_members = BTreeMap::)>>::new(); + PoolMembers::::translate::, _>(|key, old_member| { + let id = old_member.pool_id; + temp_members.entry(id).or_default().push((key, old_member.points)); + + total_points_locked += old_member.points; + members_translated += 1; + Some(PoolMember:: { + last_recorded_reward_counter: Zero::zero(), + pool_id: old_member.pool_id, + points: old_member.points, + unbonding_eras: old_member.unbonding_eras, + }) + }); + + // translate all reward pools. In the process, do the last payout as well. + RewardPools::::translate::>, _>( + |id, _old_reward_pool| { + // each pool should have at least one member. + let members = match temp_members.get(&id) { + Some(x) => x, + None => { + log!(error, "pool {} has no member! deleting it..", id); + return None + }, + }; + let bonded_pool = match BondedPools::::get(id) { + Some(x) => x, + None => { + log!(error, "pool {} has no bonded pool! deleting it..", id); + return None + }, + }; + + let accumulated_reward = RewardPool::::current_balance(id); + let reward_account = Pallet::::create_reward_account(id); + let mut sum_paid_out = BalanceOf::::zero(); + + members + .into_iter() + .filter_map(|(who, points)| { + let bonded_pool = match BondedPool::::get(id) { + Some(x) => x, + None => { + log!(error, "pool {} for member {:?} does not exist!", id, who); + return None + }, + }; + + total_value_locked += bonded_pool.points_to_balance(*points); + let portion = Perbill::from_rational(*points, bonded_pool.points); + let last_claim = portion * accumulated_reward; + + log!( + debug, + "{:?} has {:?} ({:?}) of pool {} with total reward of {:?}", + who, + portion, + last_claim, + id, + accumulated_reward + ); + + if last_claim.is_zero() { + None + } else { + Some((who, last_claim)) + } + }) + .for_each(|(who, last_claim)| { + let outcome = T::Currency::transfer( + &reward_account, + &who, + last_claim, + ExistenceRequirement::KeepAlive, + ); + + if let Err(reason) = outcome { + log!(warn, "last reward claim failed due to {:?}", reason,); + } else { + sum_paid_out = sum_paid_out.saturating_add(last_claim); + } + + Pallet::::deposit_event(Event::::PaidOut { + member: who.clone(), + pool_id: id, + payout: last_claim, + }); + }); + + // this can only be because of rounding down, or because the person we + // wanted to pay their reward to could not accept it (dust). + let leftover = accumulated_reward.saturating_sub(sum_paid_out); + if !leftover.is_zero() { + // pay it all to depositor. + let o = T::Currency::transfer( + &reward_account, + &bonded_pool.roles.depositor, + leftover, + ExistenceRequirement::KeepAlive, + ); + log!(warn, "paying {:?} leftover to the depositor: {:?}", leftover, o); + } + + // finally, migrate the reward pool. + reward_pools_translated += 1; + + Some(RewardPool { + last_recorded_reward_counter: Zero::zero(), + last_recorded_total_payouts: Zero::zero(), + total_rewards_claimed: Zero::zero(), + }) + }, + ); + + log!( + info, + "Upgraded {} members, {} reward pools, TVL {:?} TPL {:?}, storage to version {:?}", + members_translated, + reward_pools_translated, + total_value_locked, + total_points_locked, + current + ); + current.put::>(); + + T::DbWeight::get().reads_writes(members_translated + 1, reward_pools_translated + 1) + } + } + + impl OnRuntimeUpgrade for MigrateToV2 { + fn on_runtime_upgrade() -> Weight { + let current = Pallet::::current_storage_version(); + let onchain = Pallet::::on_chain_storage_version(); + + log!( + info, + "Running migration with current storage version {:?} / onchain {:?}", + current, + onchain + ); + + if current == 2 && onchain == 1 { + Self::run(current) + } else { + log!(info, "MigrateToV2 did not executed. This probably should be removed"); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + // all reward accounts must have more than ED. + RewardPools::::iter().for_each(|(id, _)| { + assert!( + T::Currency::free_balance(&Pallet::::create_reward_account(id)) >= + T::Currency::minimum_balance() + ) + }); + + Ok(()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + // new version must be set. + assert_eq!(Pallet::::on_chain_storage_version(), 2); + + // no reward or bonded pool has been skipped. + assert_eq!(RewardPools::::iter().count() as u32, RewardPools::::count()); + assert_eq!(BondedPools::::iter().count() as u32, BondedPools::::count()); + + // all reward pools must have exactly ED in them. This means no reward can be claimed, + // and that setting reward counters all over the board to zero will work henceforth. + RewardPools::::iter().for_each(|(id, _)| { + assert_eq!( + RewardPool::::current_balance(id), + Zero::zero(), + "reward pool({}) balance is {:?}", + id, + RewardPool::::current_balance(id) + ); + }); + + log!(info, "post upgrade hook for MigrateToV2 executed."); + Ok(()) + } + } +} diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index a3020e5add044..5138c55afccac 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -2,9 +2,14 @@ use super::*; use crate::{self as pools}; use frame_support::{assert_ok, parameter_types, PalletId}; use frame_system::RawOrigin; +use sp_runtime::FixedU128; pub type AccountId = u128; pub type Balance = u128; +pub type RewardCounter = FixedU128; +// This sneaky little hack allows us to write code exactly as we would do in the pallet in the tests +// as well, e.g. `StorageItem::::get()`. +pub type T = Runtime; // Ext builder creates a pool with id 1. pub fn default_bonded_account() -> AccountId { @@ -17,12 +22,14 @@ pub fn default_reward_account() -> AccountId { } parameter_types! { + pub static MinJoinBondConfig: Balance = 2; pub static CurrentEra: EraIndex = 0; pub static BondingDuration: EraIndex = 3; pub storage BondedBalanceMap: BTreeMap = Default::default(); pub storage UnbondingBalanceMap: BTreeMap = Default::default(); #[derive(Clone, PartialEq)] pub static MaxUnbonding: u32 = 8; + pub static StakingMinBond: Balance = 10; pub storage Nominations: Option> = None; } @@ -40,7 +47,7 @@ impl sp_staking::StakingInterface for StakingMock { type AccountId = AccountId; fn minimum_bond() -> Self::Balance { - 10 + StakingMinBond::get() } fn current_era() -> EraIndex { @@ -111,6 +118,7 @@ impl sp_staking::StakingInterface for StakingMock { Ok(()) } + #[cfg(feature = "runtime-benchmarks")] fn nominations(_: Self::AccountId) -> Option> { Nominations::get() } @@ -183,6 +191,8 @@ impl pools::Config for Runtime { type Event = Event; type WeightInfo = (); type Currency = Balances; + type CurrencyBalance = Balance; + type RewardCounter = RewardCounter; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; type StakingInterface = StakingMock; @@ -190,7 +200,7 @@ impl pools::Config for Runtime { type PalletId = PoolsPalletId; type MaxMetadataLen = MaxMetadataLen; type MaxUnbonding = MaxUnbonding; - type MinPointsToBalance = frame_support::traits::ConstU32<10>; + type MaxPointsToBalance = frame_support::traits::ConstU8<10>; } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -207,9 +217,16 @@ frame_support::construct_runtime!( } ); -#[derive(Default)] pub struct ExtBuilder { members: Vec<(AccountId, Balance)>, + max_members: Option, + max_members_per_pool: Option, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { members: Default::default(), max_members: Some(4), max_members_per_pool: Some(3) } + } } impl ExtBuilder { @@ -224,21 +241,42 @@ impl ExtBuilder { self } + pub(crate) fn min_bond(self, min: Balance) -> Self { + StakingMinBond::set(min); + self + } + + pub(crate) fn min_join_bond(self, min: Balance) -> Self { + MinJoinBondConfig::set(min); + self + } + pub(crate) fn with_check(self, level: u8) -> Self { CheckLevel::set(level); self } + pub(crate) fn max_members(mut self, max: Option) -> Self { + self.max_members = max; + self + } + + pub(crate) fn max_members_per_pool(mut self, max: Option) -> Self { + self.max_members_per_pool = max; + self + } + pub(crate) fn build(self) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); let _ = crate::GenesisConfig:: { - min_join_bond: 2, + min_join_bond: MinJoinBondConfig::get(), min_create_bond: 2, max_pools: Some(2), - max_members_per_pool: Some(3), - max_members: Some(4), + max_members_per_pool: self.max_members_per_pool, + max_members: self.max_members, } .assimilate_storage(&mut storage); @@ -249,8 +287,8 @@ impl ExtBuilder { frame_system::Pallet::::set_block_number(1); // make a pool - let amount_to_bond = ::StakingInterface::minimum_bond(); - Balances::make_free_balance_be(&10, amount_to_bond * 2); + let amount_to_bond = Pools::depositor_min_bond(); + Balances::make_free_balance_be(&10, amount_to_bond * 5); assert_ok!(Pools::create(RawOrigin::Signed(10).into(), amount_to_bond, 900, 901, 902)); let last_pool = LastPoolId::::get(); @@ -271,17 +309,18 @@ impl ExtBuilder { } } -pub(crate) fn unsafe_set_state(pool_id: PoolId, state: PoolState) -> Result<(), ()> { +pub(crate) fn unsafe_set_state(pool_id: PoolId, state: PoolState) { BondedPools::::try_mutate(pool_id, |maybe_bonded_pool| { maybe_bonded_pool.as_mut().ok_or(()).map(|bonded_pool| { bonded_pool.state = state; }) }) + .unwrap() } parameter_types! { - static PoolsEvents: usize = 0; - static BalancesEvents: usize = 0; + storage PoolsEvents: u32 = 0; + storage BalancesEvents: u32 = 0; } /// All events of this pallet. @@ -292,8 +331,8 @@ pub(crate) fn pool_events_since_last_call() -> Vec> { .filter_map(|e| if let Event::Pools(inner) = e { Some(inner) } else { None }) .collect::>(); let already_seen = PoolsEvents::get(); - PoolsEvents::set(events.len()); - events.into_iter().skip(already_seen).collect() + PoolsEvents::set(&(events.len() as u32)); + events.into_iter().skip(already_seen as usize).collect() } /// All events of the `Balances` pallet. @@ -304,8 +343,8 @@ pub(crate) fn balances_events_since_last_call() -> Vec>(); let already_seen = BalancesEvents::get(); - BalancesEvents::set(events.len()); - events.into_iter().skip(already_seen).collect() + BalancesEvents::set(&(events.len() as u32)); + events.into_iter().skip(already_seen as usize).collect() } /// Same as `fully_unbond`, in permissioned setting. diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 97104423c5910..705f8ce3a6449 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -17,10 +17,7 @@ use super::*; use crate::{mock::*, Event}; -use frame_support::{ - assert_err, assert_noop, assert_ok, assert_storage_noop, bounded_btree_map, - storage::{with_transaction, TransactionOutcome}, -}; +use frame_support::{assert_err, assert_noop, assert_ok, assert_storage_noop, bounded_btree_map}; use pallet_balances::Event as BEvent; use sp_runtime::traits::Dispatchable; @@ -66,7 +63,11 @@ fn test_setup_works() { ); assert_eq!( RewardPools::::get(last_pool).unwrap(), - RewardPool:: { balance: 0, points: 0u32.into(), total_earnings: 0 } + RewardPool:: { + last_recorded_reward_counter: Zero::zero(), + last_recorded_total_payouts: 0, + total_rewards_claimed: 0 + } ); assert_eq!( PoolMembers::::get(10).unwrap(), @@ -206,34 +207,34 @@ mod bonded_pool { }, }; - let min_points_to_balance: u128 = - <::MinPointsToBalance as Get>::get().into(); + let max_points_to_balance: u128 = + <::MaxPointsToBalance as Get>::get().into(); // Simulate a 100% slashed pool StakingMock::set_bonded_balance(pool.bonded_account(), 0); assert_noop!(pool.ok_to_join(0), Error::::OverflowRisk); - // Simulate a slashed pool at `MinPointsToBalance` + 1 slashed pool + // Simulate a slashed pool at `MaxPointsToBalance` + 1 slashed pool StakingMock::set_bonded_balance( pool.bonded_account(), - min_points_to_balance.saturating_add(1).into(), + max_points_to_balance.saturating_add(1).into(), ); assert_ok!(pool.ok_to_join(0)); - // Simulate a slashed pool at `MinPointsToBalance` - StakingMock::set_bonded_balance(pool.bonded_account(), min_points_to_balance); + // Simulate a slashed pool at `MaxPointsToBalance` + StakingMock::set_bonded_balance(pool.bonded_account(), max_points_to_balance); assert_noop!(pool.ok_to_join(0), Error::::OverflowRisk); StakingMock::set_bonded_balance( pool.bonded_account(), - Balance::MAX / min_points_to_balance, + Balance::MAX / max_points_to_balance, ); // New bonded balance would be over threshold of Balance type assert_noop!(pool.ok_to_join(0), Error::::OverflowRisk); // and a sanity check StakingMock::set_bonded_balance( pool.bonded_account(), - Balance::MAX / min_points_to_balance - 1, + Balance::MAX / max_points_to_balance - 1, ); assert_ok!(pool.ok_to_join(0)); }); @@ -515,44 +516,37 @@ mod join { .put(); // and reward pool - RewardPools::::insert( - 123, - RewardPool:: { - balance: Zero::zero(), - total_earnings: Zero::zero(), - points: U256::from(0u32), - }, - ); + RewardPools::::insert(123, RewardPool:: { ..Default::default() }); - // Force the points:balance ratio to `MinPointsToBalance` (100/10) - let min_points_to_balance: u128 = - <::MinPointsToBalance as Get>::get().into(); + // Force the points:balance ratio to `MaxPointsToBalance` (100/10) + let max_points_to_balance: u128 = + <::MaxPointsToBalance as Get>::get().into(); StakingMock::set_bonded_balance( Pools::create_bonded_account(123), - min_points_to_balance, + max_points_to_balance, ); assert_noop!(Pools::join(Origin::signed(11), 420, 123), Error::::OverflowRisk); StakingMock::set_bonded_balance( Pools::create_bonded_account(123), - Balance::MAX / min_points_to_balance, + Balance::MAX / max_points_to_balance, ); - // Balance needs to be gt Balance::MAX / `MinPointsToBalance` + // Balance needs to be gt Balance::MAX / `MaxPointsToBalance` assert_noop!(Pools::join(Origin::signed(11), 5, 123), Error::::OverflowRisk); - StakingMock::set_bonded_balance(Pools::create_bonded_account(1), min_points_to_balance); + StakingMock::set_bonded_balance(Pools::create_bonded_account(1), max_points_to_balance); // Cannot join a pool that isn't open - unsafe_set_state(123, PoolState::Blocked).unwrap(); + unsafe_set_state(123, PoolState::Blocked); assert_noop!( - Pools::join(Origin::signed(11), min_points_to_balance, 123), + Pools::join(Origin::signed(11), max_points_to_balance, 123), Error::::NotOpen ); - unsafe_set_state(123, PoolState::Destroying).unwrap(); + unsafe_set_state(123, PoolState::Destroying); assert_noop!( - Pools::join(Origin::signed(11), min_points_to_balance, 123), + Pools::join(Origin::signed(11), max_points_to_balance, 123), Error::::NotOpen ); @@ -649,17 +643,34 @@ mod join { mod claim_payout { use super::*; - fn del(points: Balance, reward_pool_total_earnings: Balance) -> PoolMember { + fn del(points: Balance, last_recorded_reward_counter: u128) -> PoolMember { + PoolMember { + pool_id: 1, + points, + last_recorded_reward_counter: last_recorded_reward_counter.into(), + unbonding_eras: Default::default(), + } + } + + fn del_float(points: Balance, last_recorded_reward_counter: f64) -> PoolMember { PoolMember { pool_id: 1, points, - reward_pool_total_earnings, + last_recorded_reward_counter: RewardCounter::from_float(last_recorded_reward_counter), unbonding_eras: Default::default(), } } - fn rew(balance: Balance, points: u32, total_earnings: Balance) -> RewardPool { - RewardPool { balance, points: points.into(), total_earnings } + fn rew( + last_recorded_reward_counter: u128, + last_recorded_total_payouts: Balance, + total_rewards_claimed: Balance, + ) -> RewardPool { + RewardPool { + last_recorded_reward_counter: last_recorded_reward_counter.into(), + last_recorded_total_payouts, + total_rewards_claimed, + } } #[test] @@ -672,8 +683,12 @@ mod claim_payout { Balances::make_free_balance_be(&40, 0); Balances::make_free_balance_be(&50, 0); let ed = Balances::minimum_balance(); + // and the reward pool has earned 100 in rewards - Balances::make_free_balance_be(&default_reward_account(), ed + 100); + assert_eq!(Balances::free_balance(default_reward_account()), ed); + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 100)); + + let _ = pool_events_since_last_call(); // When assert_ok!(Pools::claim_payout(Origin::signed(10))); @@ -681,22 +696,13 @@ mod claim_payout { // Then assert_eq!( pool_events_since_last_call(), - vec![ - Event::Created { depositor: 10, pool_id: 1 }, - Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Bonded { member: 40, pool_id: 1, bonded: 40, joined: true }, - Event::Bonded { member: 50, pool_id: 1, bonded: 50, joined: true }, - Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, - ] - ); - - // Expect a payout of 10: (10 del virtual points / 100 pool points) * 100 pool - // balance - assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 100)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew(90, 100 * 100 - 100 * 10, 100) + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10 },] ); + // last recorded reward counter at the time of this member's payout is 1 + assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 1)); + // pool's 'last_recorded_reward_counter' and 'last_recorded_total_payouts' don't + // really change unless if someone bonds/unbonds. + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 10)); assert_eq!(Balances::free_balance(&10), 10); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 90); @@ -708,13 +714,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 40, pool_id: 1, payout: 40 }] ); - - // Expect payout 40: (400 del virtual points / 900 pool points) * 90 pool balance - assert_eq!(PoolMembers::::get(40).unwrap(), del(40, 100)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew(50, 9_000 - 100 * 40, 100) - ); + assert_eq!(PoolMembers::::get(40).unwrap(), del(40, 1)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 50)); assert_eq!(Balances::free_balance(&40), 40); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 50); @@ -726,15 +727,13 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 50, pool_id: 1, payout: 50 }] ); - - // Expect payout 50: (50 del virtual points / 50 pool points) * 50 pool balance - assert_eq!(PoolMembers::::get(50).unwrap(), del(50, 100)); + assert_eq!(PoolMembers::::get(50).unwrap(), del(50, 1)); assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 100)); assert_eq!(Balances::free_balance(&50), 50); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 0); // Given the reward pool has some new rewards - Balances::make_free_balance_be(&default_reward_account(), ed + 50); + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); // When assert_ok!(Pools::claim_payout(Origin::signed(10))); @@ -744,10 +743,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 10, pool_id: 1, payout: 5 }] ); - - // Expect payout 5: (500 del virtual points / 5,000 pool points) * 50 pool balance - assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 150)); - assert_eq!(RewardPools::::get(&1).unwrap(), rew(45, 5_000 - 50 * 10, 150)); + assert_eq!(PoolMembers::::get(10).unwrap(), del_float(10, 1.5)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 105)); assert_eq!(Balances::free_balance(&10), 10 + 5); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 45); @@ -759,11 +756,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 40, pool_id: 1, payout: 20 }] ); - - // Expect payout 20: (2,000 del virtual points / 4,500 pool points) * 45 pool - // balance - assert_eq!(PoolMembers::::get(40).unwrap(), del(40, 150)); - assert_eq!(RewardPools::::get(&1).unwrap(), rew(25, 4_500 - 50 * 40, 150)); + assert_eq!(PoolMembers::::get(40).unwrap(), del_float(40, 1.5)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 125)); assert_eq!(Balances::free_balance(&40), 40 + 20); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 25); @@ -779,22 +773,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 50, pool_id: 1, payout: 50 }] ); - - // We expect a payout of 50: (5,000 del virtual points / 7,5000 pool points) * 75 - // pool balance - assert_eq!(PoolMembers::::get(50).unwrap(), del(50, 200)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew( - 25, - // old pool points + points from new earnings - del points. - // - // points from new earnings = new earnings(50) * bonded_pool.points(100) - // del points = member.points(50) * new_earnings_since_last_claim (100) - (2_500 + 50 * 100) - 50 * 100, - 200, - ) - ); + assert_eq!(PoolMembers::::get(50).unwrap(), del_float(50, 2.0)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 175)); assert_eq!(Balances::free_balance(&50), 50 + 50); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 25); @@ -806,10 +786,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 10, pool_id: 1, payout: 5 }] ); - - // We expect a payout of 5 - assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 200)); - assert_eq!(RewardPools::::get(&1).unwrap(), rew(20, 2_500 - 10 * 50, 200)); + assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 2)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 180)); assert_eq!(Balances::free_balance(&10), 15 + 5); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 20); @@ -827,19 +805,8 @@ mod claim_payout { ); // We expect a payout of 40 - assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 600)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew( - 380, - // old pool points + points from new earnings - del points - // - // points from new earnings = new earnings(400) * bonded_pool.points(100) - // del points = member.points(10) * new_earnings_since_last_claim(400) - (2_000 + 400 * 100) - 10 * 400, - 600 - ) - ); + assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 6)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 220)); assert_eq!(Balances::free_balance(&10), 20 + 40); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 380); @@ -855,14 +822,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 10, pool_id: 1, payout: 2 }] ); - - // Expect a payout of 2: (200 del virtual points / 38,000 pool points) * 400 pool - // balance - assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 620)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew(398, (38_000 + 20 * 100) - 10 * 20, 620) - ); + assert_eq!(PoolMembers::::get(10).unwrap(), del_float(10, 6.2)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 222)); assert_eq!(Balances::free_balance(&10), 60 + 2); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 398); @@ -874,14 +835,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 40, pool_id: 1, payout: 188 }] ); - - // Expect a payout of 188: (18,800 del virtual points / 39,800 pool points) * 399 - // pool balance - assert_eq!(PoolMembers::::get(40).unwrap(), del(40, 620)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew(210, 39_800 - 40 * 470, 620) - ); + assert_eq!(PoolMembers::::get(40).unwrap(), del_float(40, 6.2)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 410)); assert_eq!(Balances::free_balance(&40), 60 + 188); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 210); @@ -893,88 +848,20 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 50, pool_id: 1, payout: 210 }] ); - - // Expect payout of 210: (21,000 / 21,000) * 210 - assert_eq!(PoolMembers::::get(50).unwrap(), del(50, 620)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew(0, 21_000 - 50 * 420, 620) - ); + assert_eq!(PoolMembers::::get(50).unwrap(), del_float(50, 6.2)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 620)); assert_eq!(Balances::free_balance(&50), 100 + 210); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 0); }); } - #[test] - fn do_reward_payout_correctly_sets_pool_state_to_destroying() { - ExtBuilder::default().build_and_execute(|| { - let _ = with_transaction(|| -> TransactionOutcome { - let mut bonded_pool = BondedPool::::get(1).unwrap(); - let mut reward_pool = RewardPools::::get(1).unwrap(); - let mut member = PoolMembers::::get(10).unwrap(); - - // -- reward_pool.total_earnings saturates - - // Given - Balances::make_free_balance_be(&default_reward_account(), Balance::MAX); - - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut member, - &mut bonded_pool, - &mut reward_pool - )); - - // Then - assert!(bonded_pool.is_destroying()); - - storage::TransactionOutcome::Rollback(Ok(())) - }); - - // -- current_points saturates (reward_pool.points + new_earnings * bonded_pool.points) - let _ = with_transaction(|| -> TransactionOutcome { - // Given - let mut bonded_pool = BondedPool::::get(1).unwrap(); - let mut reward_pool = RewardPools::::get(1).unwrap(); - let mut member = PoolMembers::::get(10).unwrap(); - // Force new_earnings * bonded_pool.points == 100 - Balances::make_free_balance_be(&default_reward_account(), 5 + 10); - assert_eq!(bonded_pool.points, 10); - // Force reward_pool.points == U256::MAX - new_earnings * bonded_pool.points - reward_pool.points = U256::MAX - U256::from(100u32); - RewardPools::::insert(1, reward_pool.clone()); - - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut member, - &mut bonded_pool, - &mut reward_pool - )); - - // Then - assert!(bonded_pool.is_destroying()); - - storage::TransactionOutcome::Rollback(Ok(())) - }); - }); - } - #[test] fn reward_payout_errors_if_a_member_is_fully_unbonding() { ExtBuilder::default().add_members(vec![(11, 11)]).build_and_execute(|| { // fully unbond the member. - assert_ok!(Pools::fully_unbond(Origin::signed(11), 11)); - - let mut bonded_pool = BondedPool::::get(1).unwrap(); - let mut reward_pool = RewardPools::::get(1).unwrap(); - let mut member = PoolMembers::::get(11).unwrap(); + assert_ok!(fully_unbond_permissioned(11)); - assert_noop!( - Pools::do_reward_payout(&11, &mut member, &mut bonded_pool, &mut reward_pool,), - Error::::FullyUnbonding - ); + assert_noop!(Pools::claim_payout(Origin::signed(11)), Error::::FullyUnbonding); assert_eq!( pool_events_since_last_call(), @@ -982,87 +869,94 @@ mod claim_payout { Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 11, pool_id: 1, bonded: 11, joined: true }, - Event::Unbonded { member: 11, pool_id: 1, points: 11, balance: 11 } + Event::Unbonded { member: 11, pool_id: 1, points: 11, balance: 11, era: 3 } ] ); }); } #[test] - fn calculate_member_payout_works_with_a_pool_of_1() { - let del = |reward_pool_total_earnings| del(10, reward_pool_total_earnings); + fn do_reward_payout_works_with_a_pool_of_1() { + let del = |last_recorded_reward_counter| del_float(10, last_recorded_reward_counter); ExtBuilder::default().build_and_execute(|| { - let mut bonded_pool = BondedPool::::get(1).unwrap(); - let mut reward_pool = RewardPools::::get(1).unwrap(); - let mut member = PoolMembers::::get(10).unwrap(); + let (mut member, mut bonded_pool, mut reward_pool) = + Pools::get_member_with_pools(&10).unwrap(); let ed = Balances::minimum_balance(); - // Given no rewards have been earned - // When let payout = - Pools::calculate_member_payout(&mut member, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut member, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then assert_eq!(payout, 0); - assert_eq!(member, del(0)); + assert_eq!(member, del(0.0)); assert_eq!(reward_pool, rew(0, 0, 0)); // Given the pool has earned some rewards for the first time - Balances::make_free_balance_be(&default_reward_account(), ed + 5); + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 5)); // When let payout = - Pools::calculate_member_payout(&mut member, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut member, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 5); // (10 * 5 del virtual points / 10 * 5 pool points) * 5 pool balance + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 5 } + ] + ); + assert_eq!(payout, 5); assert_eq!(reward_pool, rew(0, 0, 5)); - assert_eq!(member, del(5)); + assert_eq!(member, del(0.5)); // Given the pool has earned rewards again - Balances::make_free_balance_be(&default_reward_account(), ed + 10); + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 10)); // When let payout = - Pools::calculate_member_payout(&mut member, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut member, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 10); // (10 * 10 del virtual points / 10 pool points) * 5 pool balance + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10 }] + ); + assert_eq!(payout, 10); assert_eq!(reward_pool, rew(0, 0, 15)); - assert_eq!(member, del(15)); + assert_eq!(member, del(1.5)); // Given the pool has earned no new rewards Balances::make_free_balance_be(&default_reward_account(), ed + 0); // When let payout = - Pools::calculate_member_payout(&mut member, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut member, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then + assert_eq!(pool_events_since_last_call(), vec![]); assert_eq!(payout, 0); assert_eq!(reward_pool, rew(0, 0, 15)); - assert_eq!(member, del(15)); + assert_eq!(member, del(1.5)); }); } #[test] - fn calculate_member_payout_works_with_a_pool_of_3() { + fn do_reward_payout_works_with_a_pool_of_3() { ExtBuilder::default() .add_members(vec![(40, 40), (50, 50)]) .build_and_execute(|| { let mut bonded_pool = BondedPool::::get(1).unwrap(); let mut reward_pool = RewardPools::::get(1).unwrap(); - let ed = Balances::minimum_balance(); - // PoolMember with 10 points + let mut del_10 = PoolMembers::::get(10).unwrap(); - // PoolMember with 40 points let mut del_40 = PoolMembers::::get(40).unwrap(); - // PoolMember with 50 points let mut del_50 = PoolMembers::::get(50).unwrap(); assert_eq!( @@ -1078,489 +972,1293 @@ mod claim_payout { // Given we have a total of 100 points split among the members assert_eq!(del_50.points + del_40.points + del_10.points, 100); assert_eq!(bonded_pool.points, 100); + // and the reward pool has earned 100 in rewards - Balances::make_free_balance_be(&default_reward_account(), ed + 100); + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 100)); // When let payout = - Pools::calculate_member_payout(&mut del_10, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut del_10, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 10); // (10 del virtual points / 100 pool points) * 100 pool balance - assert_eq!(del_10, del(10, 100)); - assert_eq!(reward_pool, rew(90, 100 * 100 - 100 * 10, 100)); - // Mock the reward pool transferring the payout to del_10 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 10)); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10 }] + ); + assert_eq!(payout, 10); + assert_eq!(del_10, del(10, 1)); + assert_eq!(reward_pool, rew(0, 0, 10)); // When let payout = - Pools::calculate_member_payout(&mut del_40, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&40, &mut del_40, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 40); // (400 del virtual points / 900 pool points) * 90 pool balance - assert_eq!(del_40, del(40, 100)); assert_eq!( - reward_pool, - rew( - 50, - // old pool points - member virtual points - 9_000 - 100 * 40, - 100 - ) + pool_events_since_last_call(), + vec![Event::PaidOut { member: 40, pool_id: 1, payout: 40 }] ); - // Mock the reward pool transferring the payout to del_40 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 40)); + assert_eq!(payout, 40); + assert_eq!(del_40, del(40, 1)); + assert_eq!(reward_pool, rew(0, 0, 50)); // When let payout = - Pools::calculate_member_payout(&mut del_50, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&50, &mut del_50, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 50); // (50 del virtual points / 50 pool points) * 50 pool balance - assert_eq!(del_50, del(50, 100)); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 50, pool_id: 1, payout: 50 }] + ); + assert_eq!(payout, 50); + assert_eq!(del_50, del(50, 1)); assert_eq!(reward_pool, rew(0, 0, 100)); - // Mock the reward pool transferring the payout to del_50 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 50)); // Given the reward pool has some new rewards - Balances::make_free_balance_be(&default_reward_account(), ed + 50); + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); // When let payout = - Pools::calculate_member_payout(&mut del_10, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut del_10, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 5); // (500 del virtual points / 5,000 pool points) * 50 pool balance - assert_eq!(del_10, del(10, 150)); - assert_eq!(reward_pool, rew(45, 5_000 - 50 * 10, 150)); - // Mock the reward pool transferring the payout to del_10 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 5)); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 5 }] + ); + assert_eq!(payout, 5); + assert_eq!(del_10, del_float(10, 1.5)); + assert_eq!(reward_pool, rew(0, 0, 105)); // When let payout = - Pools::calculate_member_payout(&mut del_40, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&40, &mut del_40, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 20); // (2,000 del virtual points / 4,500 pool points) * 45 pool balance - assert_eq!(del_40, del(40, 150)); - assert_eq!(reward_pool, rew(25, 4_500 - 50 * 40, 150)); - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 20)); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 40, pool_id: 1, payout: 20 }] + ); + assert_eq!(payout, 20); + assert_eq!(del_40, del_float(40, 1.5)); + assert_eq!(reward_pool, rew(0, 0, 125)); // Given del_50 hasn't claimed and the reward pools has just earned 50 assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 75); // When let payout = - Pools::calculate_member_payout(&mut del_50, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&50, &mut del_50, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 50); // (5,000 del virtual points / 7,5000 pool points) * 75 pool balance - assert_eq!(del_50, del(50, 200)); - assert_eq!( - reward_pool, - rew( - 25, - // old pool points + points from new earnings - del points. - // - // points from new earnings = new earnings(50) * bonded_pool.points(100) - // del points = member.points(50) * new_earnings_since_last_claim (100) - (2_500 + 50 * 100) - 50 * 100, - 200, - ) - ); - // Mock the reward pool transferring the payout to del_50 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 50)); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 50, pool_id: 1, payout: 50 }] + ); + assert_eq!(payout, 50); + assert_eq!(del_50, del_float(50, 2.0)); + assert_eq!(reward_pool, rew(0, 0, 175)); // When let payout = - Pools::calculate_member_payout(&mut del_10, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut del_10, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 5 }] + ); assert_eq!(payout, 5); - assert_eq!(del_10, del(10, 200)); - assert_eq!(reward_pool, rew(20, 2_500 - 10 * 50, 200)); - // Mock the reward pool transferring the payout to del_10 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 5)); + assert_eq!(del_10, del_float(10, 2.0)); + assert_eq!(reward_pool, rew(0, 0, 180)); // Given del_40 hasn't claimed and the reward pool has just earned 400 assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 400)); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 420); // When let payout = - Pools::calculate_member_payout(&mut del_10, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut del_10, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 40 }] + ); assert_eq!(payout, 40); - assert_eq!(del_10, del(10, 600)); - assert_eq!( - reward_pool, - rew( - 380, - // old pool points + points from new earnings - del points - // - // points from new earnings = new earnings(400) * bonded_pool.points(100) - // del points = member.points(10) * new_earnings_since_last_claim(400) - (2_000 + 400 * 100) - 10 * 400, - 600 - ) - ); - // Mock the reward pool transferring the payout to del_10 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 40)); + assert_eq!(del_10, del_float(10, 6.0)); + assert_eq!(reward_pool, rew(0, 0, 220)); // Given del_40 + del_50 haven't claimed and the reward pool has earned 20 assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 20)); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 400); // When let payout = - Pools::calculate_member_payout(&mut del_10, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut del_10, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 2); // (200 del virtual points / 38,000 pool points) * 400 pool balance - assert_eq!(del_10, del(10, 620)); - assert_eq!(reward_pool, rew(398, (38_000 + 20 * 100) - 10 * 20, 620)); - // Mock the reward pool transferring the payout to del_10 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 2)); + assert_eq!(payout, 2); + assert_eq!(del_10, del_float(10, 6.2)); + assert_eq!(reward_pool, rew(0, 0, 222)); // When let payout = - Pools::calculate_member_payout(&mut del_40, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&40, &mut del_40, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 188); // (18,800 del virtual points / 39,800 pool points) * 399 pool balance - assert_eq!(del_40, del(40, 620)); - assert_eq!(reward_pool, rew(210, 39_800 - 40 * 470, 620)); - // Mock the reward pool transferring the payout to del_10 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 188)); + assert_eq!(payout, 188); // 20 (from the 50) + 160 (from the 400) + 8 (from the 20) + assert_eq!(del_40, del_float(40, 6.2)); + assert_eq!(reward_pool, rew(0, 0, 410)); // When let payout = - Pools::calculate_member_payout(&mut del_50, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&50, &mut del_50, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 210); // (21,000 / 21,000) * 210 - assert_eq!(del_50, del(50, 620)); - assert_eq!(reward_pool, rew(0, 21_000 - 50 * 420, 620)); + assert_eq!(payout, 210); // 200 (from the 400) + 10 (from the 20) + assert_eq!(del_50, del_float(50, 6.2)); + assert_eq!(reward_pool, rew(0, 0, 620)); }); } #[test] - fn do_reward_payout_works() { - ExtBuilder::default() - .add_members(vec![(40, 40), (50, 50)]) - .build_and_execute(|| { - let mut bonded_pool = BondedPool::::get(1).unwrap(); - let mut reward_pool = RewardPools::::get(1).unwrap(); - let ed = Balances::minimum_balance(); + fn rewards_distribution_is_fair_basic() { + ExtBuilder::default().build_and_execute(|| { + // reward pool by 10. + Balances::mutate_account(&default_reward_account(), |f| f.free += 10).unwrap(); - assert_eq!( - pool_events_since_last_call(), - vec![ - Event::Created { depositor: 10, pool_id: 1 }, - Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Bonded { member: 40, pool_id: 1, bonded: 40, joined: true }, - Event::Bonded { member: 50, pool_id: 1, bonded: 50, joined: true } - ] - ); + // 20 joins afterwards. + Balances::make_free_balance_be(&20, Balances::minimum_balance() + 10); + assert_ok!(Pools::join(Origin::signed(20), 10, 1)); - // Given the bonded pool has 100 points - assert_eq!(bonded_pool.points, 100); - // Each member currently has a free balance of - Balances::make_free_balance_be(&10, 0); - Balances::make_free_balance_be(&40, 0); - Balances::make_free_balance_be(&50, 0); - // and the reward pool has earned 100 in rewards - Balances::make_free_balance_be(&default_reward_account(), ed + 100); + // reward by another 20 + Balances::mutate_account(&default_reward_account(), |f| f.free += 20).unwrap(); - let mut del_10 = PoolMembers::get(10).unwrap(); - let mut del_40 = PoolMembers::get(40).unwrap(); - let mut del_50 = PoolMembers::get(50).unwrap(); + // 10 should claim 10 + 10, 20 should claim 20 / 2. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 20 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 10 }, + ] + ); - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut del_10, - &mut bonded_pool, - &mut reward_pool - )); + // any upcoming rewards are shared equally. + Balances::mutate_account(&default_reward_account(), |f| f.free += 20).unwrap(); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10 }] - ); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); - // Expect a payout of 10: (10 del virtual points / 100 pool points) * 100 pool - // balance - assert_eq!(del_10, del(10, 100)); - assert_eq!(reward_pool, rew(90, 100 * 100 - 100 * 10, 100)); - assert_eq!(Balances::free_balance(&10), 10); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 90); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 10 }, + ] + ); + }); + } - // When - assert_ok!(Pools::do_reward_payout( - &40, - &mut del_40, - &mut bonded_pool, - &mut reward_pool - )); + #[test] + fn rewards_distribution_is_fair_basic_with_fractions() { + // basically checks the case where the amount of rewards is less than the pool shares. for + // this, we have to rely on fixed point arithmetic. + ExtBuilder::default().build_and_execute(|| { + Balances::mutate_account(&default_reward_account(), |f| f.free += 3).unwrap(); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 40, pool_id: 1, payout: 40 }] - ); + Balances::make_free_balance_be(&20, Balances::minimum_balance() + 10); + assert_ok!(Pools::join(Origin::signed(20), 10, 1)); - // Expect payout 40: (400 del virtual points / 900 pool points) * 90 pool balance - assert_eq!(del_40, del(40, 100)); - assert_eq!(reward_pool, rew(50, 9_000 - 100 * 40, 100)); - assert_eq!(Balances::free_balance(&40), 40); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 50); + Balances::mutate_account(&default_reward_account(), |f| f.free += 6).unwrap(); - // When - assert_ok!(Pools::do_reward_payout( - &50, - &mut del_50, - &mut bonded_pool, - &mut reward_pool - )); + // 10 should claim 3, 20 should claim 3 + 3. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 50, pool_id: 1, payout: 50 }] - ); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 3 + 3 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 3 }, + ] + ); - // Expect payout 50: (50 del virtual points / 50 pool points) * 50 pool balance - assert_eq!(del_50, del(50, 100)); - assert_eq!(reward_pool, rew(0, 0, 100)); - assert_eq!(Balances::free_balance(&50), 50); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 0); + // any upcoming rewards are shared equally. + Balances::mutate_account(&default_reward_account(), |f| f.free += 8).unwrap(); - // Given the reward pool has some new rewards - Balances::make_free_balance_be(&default_reward_account(), ed + 50); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut del_10, - &mut bonded_pool, - &mut reward_pool - )); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 4 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 4 }, + ] + ); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id: 1, payout: 5 }] - ); + // uneven upcoming rewards are shared equally, rounded down. + Balances::mutate_account(&default_reward_account(), |f| f.free += 7).unwrap(); - // Expect payout 5: (500 del virtual points / 5,000 pool points) * 50 pool balance - assert_eq!(del_10, del(10, 150)); - assert_eq!(reward_pool, rew(45, 5_000 - 50 * 10, 150)); - assert_eq!(Balances::free_balance(&10), 10 + 5); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 45); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); - // When - assert_ok!(Pools::do_reward_payout( - &40, - &mut del_40, - &mut bonded_pool, - &mut reward_pool - )); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 3 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 3 }, + ] + ); + }); + } - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 40, pool_id: 1, payout: 20 }] - ); + #[test] + fn rewards_distribution_is_fair_3() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); - // Expect payout 20: (2,000 del virtual points / 4,500 pool points) * 45 pool - // balance - assert_eq!(del_40, del(40, 150)); - assert_eq!(reward_pool, rew(25, 4_500 - 50 * 40, 150)); - assert_eq!(Balances::free_balance(&40), 40 + 20); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 25); + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); - // Given del 50 hasn't claimed and the reward pools has just earned 50 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 75); + Balances::make_free_balance_be(&20, ed + 10); + assert_ok!(Pools::join(Origin::signed(20), 10, 1)); - // When - assert_ok!(Pools::do_reward_payout( - &50, - &mut del_50, - &mut bonded_pool, - &mut reward_pool - )); + Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 50, pool_id: 1, payout: 50 }] - ); + Balances::make_free_balance_be(&30, ed + 10); + assert_ok!(Pools::join(Origin::signed(30), 10, 1)); - // We expect a payout of 50: (5,000 del virtual points / 7,5000 pool points) * 75 - // pool balance - assert_eq!(del_50, del(50, 200)); - assert_eq!( - reward_pool, - rew( - 25, - // old pool points + points from new earnings - del points. - // - // points from new earnings = new earnings(50) * bonded_pool.points(100) - // del points = member.points(50) * new_earnings_since_last_claim (100) - (2_500 + 50 * 100) - 50 * 100, - 200, - ) - ); - assert_eq!(Balances::free_balance(&50), 50 + 50); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 25); + Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut del_10, - &mut bonded_pool, - &mut reward_pool - )); + // 10 should claim 10, 20 should claim nothing. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id: 1, payout: 5 }] - ); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 30, pool_id: 1, bonded: 10, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 30 + 100 / 2 + 60 / 3 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 100 / 2 + 60 / 3 }, + Event::PaidOut { member: 30, pool_id: 1, payout: 60 / 3 }, + ] + ); - // We expect a payout of 5 - assert_eq!(del_10, del(10, 200)); - assert_eq!(reward_pool, rew(20, 2_500 - 10 * 50, 200)); - assert_eq!(Balances::free_balance(&10), 15 + 5); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 20); + // any upcoming rewards are shared equally. + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); - // Given del 40 hasn't claimed and the reward pool has just earned 400 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 400)); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 420); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut del_10, - &mut bonded_pool, - &mut reward_pool - )); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 10 }, + Event::PaidOut { member: 30, pool_id: 1, payout: 10 }, + ] + ); + }); + } - // Then + #[test] + fn pending_rewards_per_member_works() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); + + assert_eq!(Pools::pending_rewards(10), Some(0)); + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + assert_eq!(Pools::pending_rewards(10), Some(30)); + assert_eq!(Pools::pending_rewards(20), None); + + Balances::make_free_balance_be(&20, ed + 10); + assert_ok!(Pools::join(Origin::signed(20), 10, 1)); + + assert_eq!(Pools::pending_rewards(10), Some(30)); + assert_eq!(Pools::pending_rewards(20), Some(0)); + + Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); + + assert_eq!(Pools::pending_rewards(10), Some(30 + 50)); + assert_eq!(Pools::pending_rewards(20), Some(50)); + assert_eq!(Pools::pending_rewards(30), None); + + Balances::make_free_balance_be(&30, ed + 10); + assert_ok!(Pools::join(Origin::signed(30), 10, 1)); + + assert_eq!(Pools::pending_rewards(10), Some(30 + 50)); + assert_eq!(Pools::pending_rewards(20), Some(50)); + assert_eq!(Pools::pending_rewards(30), Some(0)); + + Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + + assert_eq!(Pools::pending_rewards(10), Some(30 + 50 + 20)); + assert_eq!(Pools::pending_rewards(20), Some(50 + 20)); + assert_eq!(Pools::pending_rewards(30), Some(20)); + + // 10 should claim 10, 20 should claim nothing. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_eq!(Pools::pending_rewards(10), Some(0)); + assert_eq!(Pools::pending_rewards(20), Some(50 + 20)); + assert_eq!(Pools::pending_rewards(30), Some(20)); + + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_eq!(Pools::pending_rewards(10), Some(0)); + assert_eq!(Pools::pending_rewards(20), Some(0)); + assert_eq!(Pools::pending_rewards(30), Some(20)); + + assert_ok!(Pools::claim_payout(Origin::signed(30))); + assert_eq!(Pools::pending_rewards(10), Some(0)); + assert_eq!(Pools::pending_rewards(20), Some(0)); + assert_eq!(Pools::pending_rewards(30), Some(0)); + }); + } + + #[test] + fn rewards_distribution_is_fair_bond_extra() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); + + Balances::make_free_balance_be(&20, ed + 20); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + Balances::make_free_balance_be(&30, ed + 20); + assert_ok!(Pools::join(Origin::signed(30), 10, 1)); + + Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); + + // everyone claims. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::Bonded { member: 30, pool_id: 1, bonded: 10, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 20 }, + Event::PaidOut { member: 30, pool_id: 1, payout: 10 } + ] + ); + + // 30 now bumps itself to be like 20. + assert_ok!(Pools::bond_extra(Origin::signed(30), BondExtra::FreeBalance(10))); + + // more rewards come in. + Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); + + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Bonded { member: 30, pool_id: 1, bonded: 10, joined: false }, + Event::PaidOut { member: 10, pool_id: 1, payout: 20 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 40 }, + Event::PaidOut { member: 30, pool_id: 1, payout: 40 } + ] + ); + }); + } + + #[test] + fn rewards_distribution_is_fair_unbond() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); + + Balances::make_free_balance_be(&20, ed + 20); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + + // everyone claims. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 20 } + ] + ); + + // 20 unbonds to be equal to 10 (10 points each). + assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); + + // more rewards come in. + Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); + + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 3 }, + Event::PaidOut { member: 10, pool_id: 1, payout: 50 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 50 }, + ] + ); + }); + } + + #[test] + fn unclaimed_reward_is_safe() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); + + Balances::make_free_balance_be(&20, ed + 20); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + Balances::make_free_balance_be(&30, ed + 20); + assert_ok!(Pools::join(Origin::signed(30), 10, 1)); + + // 10 gets 10, 20 gets 20, 30 gets 10 + Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); + + // some claim. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::Bonded { member: 30, pool_id: 1, bonded: 10, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 20 } + ] + ); + + // 10 gets 20, 20 gets 40, 30 gets 20 + Balances::mutate_account(&default_reward_account(), |f| f.free += 80).unwrap(); + + // some claim. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 20 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 40 } + ] + ); + + // 10 gets 20, 20 gets 40, 30 gets 20 + Balances::mutate_account(&default_reward_account(), |f| f.free += 80).unwrap(); + + // some claim. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 20 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 40 } + ] + ); + + // now 30 claims all at once + assert_ok!(Pools::claim_payout(Origin::signed(30))); + + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 30, pool_id: 1, payout: 10 + 20 + 20 }] + ); + }); + } + + #[test] + fn bond_extra_and_delayed_claim() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); + + Balances::make_free_balance_be(&20, ed + 200); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + + // 10 gets 10, 20 gets 20, 30 gets 10 + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + + // some claim. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 10 } + ] + ); + + // 20 has not claimed yet, more reward comes + Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + + // and 20 bonds more -- they should not have more share of this reward. + assert_ok!(Pools::bond_extra(Origin::signed(20), BondExtra::FreeBalance(10))); + + // everyone claim. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + // 20 + 40, which means the extra amount they bonded did not impact us. + Event::PaidOut { member: 20, pool_id: 1, payout: 60 }, + Event::Bonded { member: 20, pool_id: 1, bonded: 10, joined: false }, + Event::PaidOut { member: 10, pool_id: 1, payout: 20 } + ] + ); + + // but in the next round of rewards, the extra10 they bonded has an impact. + Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + + // everyone claim. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 15 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 45 } + ] + ); + }); + } + + #[test] + fn create_sets_recorded_data() { + ExtBuilder::default().build_and_execute(|| { + MaxPools::::set(None); + // pool 10 has already been created. + let (member_10, _, reward_pool_10) = Pools::get_member_with_pools(&10).unwrap(); + + assert_eq!(reward_pool_10.last_recorded_total_payouts, 0); + assert_eq!(reward_pool_10.total_rewards_claimed, 0); + assert_eq!(reward_pool_10.last_recorded_reward_counter, 0.into()); + + assert_eq!(member_10.last_recorded_reward_counter, 0.into()); + + // transfer some reward to pool 1. + Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + + // create pool 2 + Balances::make_free_balance_be(&20, 100); + assert_ok!(Pools::create(Origin::signed(20), 10, 20, 20, 20)); + + // has no impact -- initial + let (member_20, _, reward_pool_20) = Pools::get_member_with_pools(&20).unwrap(); + + assert_eq!(reward_pool_20.last_recorded_total_payouts, 0); + assert_eq!(reward_pool_20.total_rewards_claimed, 0); + assert_eq!(reward_pool_20.last_recorded_reward_counter, 0.into()); + + assert_eq!(member_20.last_recorded_reward_counter, 0.into()); + + // pre-fund the reward account of pool id 3 with some funds. + Balances::make_free_balance_be(&Pools::create_reward_account(3), 10); + + // create pool 3 + Balances::make_free_balance_be(&30, 100); + assert_ok!(Pools::create(Origin::signed(30), 10, 30, 30, 30)); + + // reward counter is still the same. + let (member_30, _, reward_pool_30) = Pools::get_member_with_pools(&30).unwrap(); + assert_eq!( + Balances::free_balance(&Pools::create_reward_account(3)), + 10 + Balances::minimum_balance() + ); + + assert_eq!(reward_pool_30.last_recorded_total_payouts, 0); + assert_eq!(reward_pool_30.total_rewards_claimed, 0); + assert_eq!(reward_pool_30.last_recorded_reward_counter, 0.into()); + + assert_eq!(member_30.last_recorded_reward_counter, 0.into()); + + // and 30 can claim the reward now. + assert_ok!(Pools::claim_payout(Origin::signed(30))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Created { depositor: 20, pool_id: 2 }, + Event::Bonded { member: 20, pool_id: 2, bonded: 10, joined: true }, + Event::Created { depositor: 30, pool_id: 3 }, + Event::Bonded { member: 30, pool_id: 3, bonded: 10, joined: true }, + Event::PaidOut { member: 30, pool_id: 3, payout: 10 } + ] + ); + }) + } + + #[test] + fn join_updates_recorded_data() { + ExtBuilder::default().build_and_execute(|| { + MaxPoolMembers::::set(None); + MaxPoolMembersPerPool::::set(None); + let join = |x, y| { + Balances::make_free_balance_be(&x, y + Balances::minimum_balance()); + assert_ok!(Pools::join(Origin::signed(x), y, 1)); + }; + + { + let (member_10, _, reward_pool_10) = Pools::get_member_with_pools(&10).unwrap(); + + assert_eq!(reward_pool_10.last_recorded_total_payouts, 0); + assert_eq!(reward_pool_10.total_rewards_claimed, 0); + assert_eq!(reward_pool_10.last_recorded_reward_counter, 0.into()); + + assert_eq!(member_10.last_recorded_reward_counter, 0.into()); + } + + // someone joins without any rewards being issued. + { + join(20, 10); + let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); + // reward counter is 0 both before.. + assert_eq!(member.last_recorded_reward_counter, 0.into()); + assert_eq!(reward_pool.last_recorded_total_payouts, 0); + assert_eq!(reward_pool.last_recorded_reward_counter, 0.into()); + } + + // transfer some reward to pool 1. + Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + + { + join(30, 10); + let (member, _, reward_pool) = Pools::get_member_with_pools(&30).unwrap(); + assert_eq!(reward_pool.last_recorded_total_payouts, 60); + // explanation: we have a total of 20 points so far (excluding the 10 that just got + // bonded), and 60 unclaimed rewards. each share is then roughly worth of 3 units of + // rewards, thus reward counter is 3. member's reward counter is the same + assert_eq!(member.last_recorded_reward_counter, 3.into()); + assert_eq!(reward_pool.last_recorded_reward_counter, 3.into()); + } + + // someone else joins + { + join(40, 10); + let (member, _, reward_pool) = Pools::get_member_with_pools(&40).unwrap(); + // reward counter does not change since no rewards have came in. + assert_eq!(member.last_recorded_reward_counter, 3.into()); + assert_eq!(reward_pool.last_recorded_reward_counter, 3.into()); + assert_eq!(reward_pool.last_recorded_total_payouts, 60); + } + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 30, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 40, pool_id: 1, bonded: 10, joined: true } + ] + ); + }) + } + + #[test] + fn bond_extra_updates_recorded_data() { + ExtBuilder::default().add_members(vec![(20, 20)]).build_and_execute(|| { + MaxPoolMembers::::set(None); + MaxPoolMembersPerPool::::set(None); + + // initial state of pool 1. + { + let (member_10, _, reward_pool_10) = Pools::get_member_with_pools(&10).unwrap(); + + assert_eq!(reward_pool_10.last_recorded_total_payouts, 0); + assert_eq!(reward_pool_10.total_rewards_claimed, 0); + assert_eq!(reward_pool_10.last_recorded_reward_counter, 0.into()); + + assert_eq!(member_10.last_recorded_reward_counter, 0.into()); + } + + Balances::make_free_balance_be(&10, 100); + Balances::make_free_balance_be(&20, 100); + + // 10 bonds extra without any rewards. + { + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); + assert_eq!(member.last_recorded_reward_counter, 0.into()); + assert_eq!(reward_pool.last_recorded_total_payouts, 0); + assert_eq!(reward_pool.last_recorded_reward_counter, 0.into()); + } + + // 10 bonds extra again with some rewards. This reward should be split equally between + // 10 and 20, as they both have equal points now. + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + + { + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); + // explanation: before bond_extra takes place, there is 40 points and 30 balance in + // the system, RewardCounter is therefore 7.5 + assert_eq!(member.last_recorded_reward_counter, RewardCounter::from_float(0.75)); assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id: 1, payout: 40 }] + reward_pool.last_recorded_reward_counter, + RewardCounter::from_float(0.75) ); + assert_eq!(reward_pool.last_recorded_total_payouts, 30); + } - // We expect a payout of 40 - assert_eq!(del_10, del(10, 600)); - assert_eq!( - reward_pool, - rew( - 380, - // old pool points + points from new earnings - del points - // - // points from new earnings = new earnings(400) * bonded_pool.points(100) - // del points = member.points(10) * new_earnings_since_last_claim(400) - (2_000 + 400 * 100) - 10 * 400, - 600 - ) + // 20 bonds extra again, without further rewards. + { + assert_ok!(Pools::bond_extra(Origin::signed(20), BondExtra::FreeBalance(10))); + let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); + assert_eq!(member.last_recorded_reward_counter, RewardCounter::from_float(0.75)); + assert_eq!( + reward_pool.last_recorded_reward_counter, + RewardCounter::from_float(0.75) ); - assert_eq!(Balances::free_balance(&10), 20 + 40); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 380); + assert_eq!(reward_pool.last_recorded_total_payouts, 30); + } - // Given del 40 + del 50 haven't claimed and the reward pool has earned 20 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 20)); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 400); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: false }, + Event::PaidOut { member: 10, pool_id: 1, payout: 15 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: false }, + Event::PaidOut { member: 20, pool_id: 1, payout: 15 }, + Event::Bonded { member: 20, pool_id: 1, bonded: 10, joined: false } + ] + ); + }) + } - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut del_10, - &mut bonded_pool, - &mut reward_pool - )); + #[test] + fn unbond_updates_recorded_data() { + ExtBuilder::default() + .add_members(vec![(20, 20), (30, 20)]) + .build_and_execute(|| { + MaxPoolMembers::::set(None); + MaxPoolMembersPerPool::::set(None); + + // initial state of pool 1. + { + let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); + + assert_eq!(reward_pool.last_recorded_total_payouts, 0); + assert_eq!(reward_pool.total_rewards_claimed, 0); + assert_eq!(reward_pool.last_recorded_reward_counter, 0.into()); + + assert_eq!(member.last_recorded_reward_counter, 0.into()); + } + + // 20 unbonds without any rewards. + { + assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); + let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); + assert_eq!(member.last_recorded_reward_counter, 0.into()); + assert_eq!(reward_pool.last_recorded_total_payouts, 0); + assert_eq!(reward_pool.last_recorded_reward_counter, 0.into()); + } + + // some rewards come in. + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + + // and 30 also unbonds half. + { + assert_ok!(Pools::unbond(Origin::signed(30), 30, 10)); + let (member, _, reward_pool) = Pools::get_member_with_pools(&30).unwrap(); + // 30 reward in the system, and 40 points before this unbond to collect it, + // RewardCounter is 3/4. + assert_eq!( + member.last_recorded_reward_counter, + RewardCounter::from_float(0.75) + ); + assert_eq!(reward_pool.last_recorded_total_payouts, 30); + assert_eq!( + reward_pool.last_recorded_reward_counter, + RewardCounter::from_float(0.75) + ); + } + + // 30 unbonds again, not change this time. + { + assert_ok!(Pools::unbond(Origin::signed(30), 30, 5)); + let (member, _, reward_pool) = Pools::get_member_with_pools(&30).unwrap(); + assert_eq!( + member.last_recorded_reward_counter, + RewardCounter::from_float(0.75) + ); + assert_eq!(reward_pool.last_recorded_total_payouts, 30); + assert_eq!( + reward_pool.last_recorded_reward_counter, + RewardCounter::from_float(0.75) + ); + } + + // 20 unbonds again, not change this time, just collecting their reward. + { + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); + assert_eq!( + member.last_recorded_reward_counter, + RewardCounter::from_float(0.75) + ); + assert_eq!(reward_pool.last_recorded_total_payouts, 30); + assert_eq!( + reward_pool.last_recorded_reward_counter, + RewardCounter::from_float(0.75) + ); + } + + // trigger 10's reward as well to see all of the payouts. + assert_ok!(Pools::claim_payout(Origin::signed(10))); - // Then assert_eq!( pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id: 1, payout: 2 }] + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::Bonded { member: 30, pool_id: 1, bonded: 20, joined: true }, + Event::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 3 }, + Event::PaidOut { member: 30, pool_id: 1, payout: 15 }, + Event::Unbonded { member: 30, pool_id: 1, balance: 10, points: 10, era: 3 }, + Event::Unbonded { member: 30, pool_id: 1, balance: 5, points: 5, era: 3 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 7 }, + Event::Unbonded { member: 20, pool_id: 1, balance: 5, points: 5, era: 3 }, + Event::PaidOut { member: 10, pool_id: 1, payout: 7 } + ] + ); + }) + } + + #[test] + fn rewards_are_rounded_down_depositor_collects_them() { + ExtBuilder::default().add_members(vec![(20, 20)]).build_and_execute(|| { + // initial balance of 10. + + assert_eq!(Balances::free_balance(&10), 35); + assert_eq!( + Balances::free_balance(&default_reward_account()), + Balances::minimum_balance() + ); + + // some rewards come in. + Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); + + // everyone claims + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + // some dust (1) remains in the reward account. + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 13 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 26 } + ] + ); + + // start dismantling the pool. + assert_ok!(Pools::set_state(Origin::signed(902), 1, PoolState::Destroying)); + assert_ok!(fully_unbond_permissioned(20)); + + CurrentEra::set(3); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(20), 20, 0)); + assert_ok!(fully_unbond_permissioned(10)); + + CurrentEra::set(6); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, + Event::Unbonded { member: 20, pool_id: 1, balance: 20, points: 20, era: 3 }, + Event::Withdrawn { member: 20, pool_id: 1, balance: 20, points: 20 }, + Event::MemberRemoved { pool_id: 1, member: 20 }, + Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 6 }, + Event::Withdrawn { member: 10, pool_id: 1, balance: 10, points: 10 }, + Event::MemberRemoved { pool_id: 1, member: 10 }, + Event::Destroyed { pool_id: 1 } + ] + ); + + // original ed + ed put into reward account + reward + bond + dust. + assert_eq!(Balances::free_balance(&10), 35 + 5 + 13 + 10 + 1); + }) + } + + #[test] + fn claim_payout_large_numbers() { + let unit = 10u128.pow(12); // akin to KSM + ExistentialDeposit::set(unit); + StakingMinBond::set(unit * 1000); + + ExtBuilder::default() + .max_members(Some(4)) + .max_members_per_pool(Some(4)) + .add_members(vec![(20, 1500 * unit), (21, 2500 * unit), (22, 5000 * unit)]) + .build_and_execute(|| { + // some rewards come in. + assert_eq!(Balances::free_balance(&default_reward_account()), unit); + Balances::mutate_account(&default_reward_account(), |f| f.free += unit / 1000) + .unwrap(); + + // everyone claims + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(21))); + assert_ok!(Pools::claim_payout(Origin::signed(22))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { + member: 10, + pool_id: 1, + bonded: 1000000000000000, + joined: true + }, + Event::Bonded { + member: 20, + pool_id: 1, + bonded: 1500000000000000, + joined: true + }, + Event::Bonded { + member: 21, + pool_id: 1, + bonded: 2500000000000000, + joined: true + }, + Event::Bonded { + member: 22, + pool_id: 1, + bonded: 5000000000000000, + joined: true + }, + Event::PaidOut { member: 10, pool_id: 1, payout: 100000000 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 150000000 }, + Event::PaidOut { member: 21, pool_id: 1, payout: 250000000 }, + Event::PaidOut { member: 22, pool_id: 1, payout: 500000000 } + ] + ); + }) + } +} + +mod unbond { + use super::*; + + #[test] + fn member_unbond_open() { + // depositor in pool, pool state open + // - member unbond above limit + // - member unbonds to 0 + // - member cannot unbond between within limit and 0 + ExtBuilder::default() + .min_join_bond(10) + .add_members(vec![(20, 20)]) + .build_and_execute(|| { + // can unbond to above limit + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 15); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 5); + + // cannot go to below 10: + assert_noop!( + Pools::unbond(Origin::signed(20), 20, 10), + Error::::MinimumBondNotMet + ); + + // but can go to 0 + assert_ok!(Pools::unbond(Origin::signed(20), 20, 15)); + assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 0); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 20); + }) + } + + #[test] + fn member_kicked() { + // depositor in pool, pool state blocked + // - member cannot be kicked to above limit + // - member cannot be kicked between within limit and 0 + // - member kicked to 0 + ExtBuilder::default() + .min_join_bond(10) + .add_members(vec![(20, 20)]) + .build_and_execute(|| { + unsafe_set_state(1, PoolState::Blocked); + let kicker = DEFAULT_ROLES.state_toggler.unwrap(); + + // cannot be kicked to above the limit. + assert_noop!( + Pools::unbond(Origin::signed(kicker), 20, 5), + Error::::PartialUnbondNotAllowedPermissionlessly ); - // Expect a payout of 2: (200 del virtual points / 38,000 pool points) * 400 pool - // balance - assert_eq!(del_10, del(10, 620)); - assert_eq!(reward_pool, rew(398, (38_000 + 20 * 100) - 10 * 20, 620)); - assert_eq!(Balances::free_balance(&10), 60 + 2); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 398); + // cannot go to below 10: + assert_noop!( + Pools::unbond(Origin::signed(kicker), 20, 15), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // but they themselves can do an unbond + assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); + assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 18); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 2); + + // can be kicked to 0. + assert_ok!(Pools::unbond(Origin::signed(kicker), 20, 18)); + assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 0); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 20); + }) + } + + #[test] + fn member_unbond_destroying() { + // depositor in pool, pool state destroying + // - member cannot be permissionlessly unbonded to above limit + // - member cannot be permissionlessly unbonded between within limit and 0 + // - member permissionlessly unbonded to 0 + ExtBuilder::default() + .min_join_bond(10) + .add_members(vec![(20, 20)]) + .build_and_execute(|| { + unsafe_set_state(1, PoolState::Destroying); + let random = 123; + + // cannot be kicked to above the limit. + assert_noop!( + Pools::unbond(Origin::signed(random), 20, 5), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // cannot go to below 10: + assert_noop!( + Pools::unbond(Origin::signed(random), 20, 15), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // but they themselves can do an unbond + assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); + assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 18); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 2); + + // but can go to 0 + assert_ok!(Pools::unbond(Origin::signed(random), 20, 18)); + assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 0); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 20); + }) + } + + #[test] + fn depositor_unbond_open() { + // depositor in pool, pool state open + // - depositor unbonds to above limit + // - depositor cannot unbond to below limit or 0 + ExtBuilder::default().min_join_bond(10).build_and_execute(|| { + // give the depositor some extra funds. + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_eq!(PoolMembers::::get(10).unwrap().points, 20); + + // can unbond to above the limit. + assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); + assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 15); + assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 5); + + // cannot go to below 10: + assert_noop!(Pools::unbond(Origin::signed(10), 10, 10), Error::::MinimumBondNotMet); + + // cannot go to 0 either. + assert_noop!(Pools::unbond(Origin::signed(10), 10, 15), Error::::MinimumBondNotMet); + }) + } + + #[test] + fn depositor_kick() { + // depositor in pool, pool state blocked + // - depositor can never be kicked. + ExtBuilder::default().min_join_bond(10).build_and_execute(|| { + // give the depositor some extra funds. + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_eq!(PoolMembers::::get(10).unwrap().points, 20); + + // set the stage + unsafe_set_state(1, PoolState::Blocked); + let kicker = DEFAULT_ROLES.state_toggler.unwrap(); + + // cannot be kicked to above limit. + assert_noop!( + Pools::unbond(Origin::signed(kicker), 10, 5), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // or below the limit + assert_noop!( + Pools::unbond(Origin::signed(kicker), 10, 15), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // or 0. + assert_noop!( + Pools::unbond(Origin::signed(kicker), 10, 20), + Error::::DoesNotHavePermission + ); + + // they themselves cannot do it either + assert_noop!(Pools::unbond(Origin::signed(10), 10, 20), Error::::MinimumBondNotMet); + }) + } + + #[test] + fn depositor_unbond_destroying_permissionless() { + // depositor can never be permissionlessly unbonded. + ExtBuilder::default().min_join_bond(10).build_and_execute(|| { + // give the depositor some extra funds. + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_eq!(PoolMembers::::get(10).unwrap().points, 20); + + // set the stage + unsafe_set_state(1, PoolState::Destroying); + let random = 123; + + // cannot be kicked to above limit. + assert_noop!( + Pools::unbond(Origin::signed(random), 10, 5), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // or below the limit + assert_noop!( + Pools::unbond(Origin::signed(random), 10, 15), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // or 0. + assert_noop!( + Pools::unbond(Origin::signed(random), 10, 20), + Error::::DoesNotHavePermission + ); + + // they themselves can do it in this case though. + assert_ok!(Pools::unbond(Origin::signed(10), 10, 20)); + }) + } + + #[test] + fn depositor_unbond_destroying_not_last_member() { + // deposit in pool, pool state destroying + // - depositor can never leave if there is another member in the pool. + ExtBuilder::default() + .min_join_bond(10) + .add_members(vec![(20, 20)]) + .build_and_execute(|| { + // give the depositor some extra funds. + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_eq!(PoolMembers::::get(10).unwrap().points, 20); + + // set the stage + unsafe_set_state(1, PoolState::Destroying); - // When - assert_ok!(Pools::do_reward_payout( - &40, - &mut del_40, - &mut bonded_pool, - &mut reward_pool - )); + // can go above the limit + assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); + assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 15); + assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 5); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 40, pool_id: 1, payout: 188 }] + // but not below the limit + assert_noop!( + Pools::unbond(Origin::signed(10), 10, 10), + Error::::MinimumBondNotMet ); - // Expect a payout of 188: (18,800 del virtual points / 39,800 pool points) * 399 - // pool balance - assert_eq!(del_40, del(40, 620)); - assert_eq!(reward_pool, rew(210, 39_800 - 40 * 470, 620)); - assert_eq!(Balances::free_balance(&40), 60 + 188); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 210); + // and certainly not zero + assert_noop!( + Pools::unbond(Origin::signed(10), 10, 15), + Error::::MinimumBondNotMet + ); + }) + } - // When - assert_ok!(Pools::do_reward_payout( - &50, - &mut del_50, - &mut bonded_pool, - &mut reward_pool - )); + #[test] + fn depositor_unbond_destroying_last_member() { + // deposit in pool, pool state destroying + // - depositor can unbond to above limit always. + // - depositor cannot unbond to below limit if last. + // - depositor can unbond to 0 if last and destroying. + ExtBuilder::default().min_join_bond(10).build_and_execute(|| { + // give the depositor some extra funds. + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_eq!(PoolMembers::::get(10).unwrap().points, 20); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 50, pool_id: 1, payout: 210 }] - ); + // set the stage + unsafe_set_state(1, PoolState::Destroying); - // Expect payout of 210: (21,000 / 21,000) * 210 - assert_eq!(del_50, del(50, 620)); - assert_eq!(reward_pool, rew(0, 21_000 - 50 * 420, 620)); - assert_eq!(Balances::free_balance(&50), 100 + 210); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 0); - }); - } -} + // can unbond to above the limit. + assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); + assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 15); + assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 5); -mod unbond { - use super::*; + // still cannot go to below limit + assert_noop!(Pools::unbond(Origin::signed(10), 10, 10), Error::::MinimumBondNotMet); + + // can go to 0 too. + assert_ok!(Pools::unbond(Origin::signed(10), 10, 15)); + assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 0); + assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 20); + }) + } #[test] fn unbond_of_1_works() { ExtBuilder::default().build_and_execute(|| { - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); assert_ok!(fully_unbond_permissioned(10)); assert_eq!( @@ -1624,7 +2322,7 @@ mod unbond { Event::Bonded { member: 40, pool_id: 1, bonded: 40, joined: true }, Event::Bonded { member: 550, pool_id: 1, bonded: 550, joined: true }, Event::PaidOut { member: 40, pool_id: 1, payout: 40 }, - Event::Unbonded { member: 40, pool_id: 1, points: 6, balance: 6 } + Event::Unbonded { member: 40, pool_id: 1, points: 6, balance: 6, era: 3 } ] ); @@ -1636,7 +2334,7 @@ mod unbond { assert_eq!(Balances::free_balance(&40), 40 + 40); // We claim rewards when unbonding // When - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); assert_ok!(fully_unbond_permissioned(550)); // Then @@ -1666,7 +2364,13 @@ mod unbond { pool_events_since_last_call(), vec![ Event::PaidOut { member: 550, pool_id: 1, payout: 550 }, - Event::Unbonded { member: 550, pool_id: 1, points: 92, balance: 92 } + Event::Unbonded { + member: 550, + pool_id: 1, + points: 92, + balance: 92, + era: 3 + } ] ); @@ -1704,7 +2408,7 @@ mod unbond { Event::Withdrawn { member: 550, pool_id: 1, points: 92, balance: 92 }, Event::MemberRemoved { pool_id: 1, member: 550 }, Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, - Event::Unbonded { member: 10, pool_id: 1, points: 2, balance: 2 } + Event::Unbonded { member: 10, pool_id: 1, points: 2, balance: 2, era: 6 } ] ); }); @@ -1726,7 +2430,7 @@ mod unbond { }, }, ); - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); // When let current_era = 1 + TotalUnbondingPools::::get(); @@ -1750,7 +2454,7 @@ mod unbond { vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 10, pool_id: 1, points: 10, balance: 10 } + Event::Unbonded { member: 10, pool_id: 1, points: 10, balance: 10, era: 9 } ] ); }); @@ -1763,7 +2467,7 @@ mod unbond { .add_members(vec![(100, 100), (200, 200)]) .build_and_execute(|| { // Given - unsafe_set_state(1, PoolState::Blocked).unwrap(); + unsafe_set_state(1, PoolState::Blocked); let bonded_pool = BondedPool::::get(1).unwrap(); assert_eq!(bonded_pool.roles.root.unwrap(), 900); assert_eq!(bonded_pool.roles.nominator.unwrap(), 901); @@ -1785,7 +2489,13 @@ mod unbond { Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 100, pool_id: 1, bonded: 100, joined: true }, Event::Bonded { member: 200, pool_id: 1, bonded: 200, joined: true }, - Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100 }, + Event::Unbonded { + member: 100, + pool_id: 1, + points: 100, + balance: 100, + era: 3 + }, ] ); @@ -1794,7 +2504,13 @@ mod unbond { assert_eq!( pool_events_since_last_call(), - vec![Event::Unbonded { member: 200, pool_id: 1, points: 200, balance: 200 }] + vec![Event::Unbonded { + member: 200, + pool_id: 1, + points: 200, + balance: 200, + era: 3 + }] ); assert_eq!( @@ -1831,7 +2547,7 @@ mod unbond { // Scenarios where non-admin accounts can unbond others ExtBuilder::default().add_members(vec![(100, 100)]).build_and_execute(|| { // Given the pool is blocked - unsafe_set_state(1, PoolState::Blocked).unwrap(); + unsafe_set_state(1, PoolState::Blocked); // A permissionless unbond attempt errors assert_noop!( @@ -1846,23 +2562,24 @@ mod unbond { ); // Given the pool is destroying - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); // The depositor cannot be fully unbonded until they are the last member assert_noop!( Pools::fully_unbond(Origin::signed(10), 10), - Error::::NotOnlyPoolMember + Error::::MinimumBondNotMet, ); // Any account can unbond a member that is not the depositor assert_ok!(Pools::fully_unbond(Origin::signed(420), 100)); + assert_eq!( pool_events_since_last_call(), vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 100, pool_id: 1, bonded: 100, joined: true }, - Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100 } + Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100, era: 3 } ] ); @@ -1873,7 +2590,7 @@ mod unbond { ); // Given the pool is blocked - unsafe_set_state(1, PoolState::Blocked).unwrap(); + unsafe_set_state(1, PoolState::Blocked); // The depositor cannot be unbonded assert_noop!( @@ -1882,7 +2599,7 @@ mod unbond { ); // Given the pools is destroying - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); // The depositor cannot be unbonded yet. assert_noop!( @@ -1900,8 +2617,13 @@ mod unbond { Error::::PartialUnbondNotAllowedPermissionlessly, ); - // but full unbond works. - assert_ok!(Pools::fully_unbond(Origin::signed(420), 10)); + // depositor can never be unbonded permissionlessly . + assert_noop!( + Pools::fully_unbond(Origin::signed(420), 10), + Error::::DoesNotHavePermission + ); + // but depositor itself can do it. + assert_ok!(Pools::fully_unbond(Origin::signed(10), 10)); assert_eq!(BondedPools::::get(1).unwrap().points, 0); assert_eq!( @@ -1961,6 +2683,12 @@ mod unbond { #[test] fn partial_unbond_era_tracking() { ExtBuilder::default().build_and_execute(|| { + // to make the depositor capable of withdrawing. + StakingMinBond::set(1); + MinCreateBond::::set(1); + MinJoinBond::::set(1); + assert_eq!(Pools::depositor_min_bond(), 1); + // given assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 10); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 0); @@ -1975,7 +2703,7 @@ mod unbond { assert_eq!(BondingDuration::get(), 3); // so the depositor can leave, just keeps the test simpler. - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); // when: casual unbond assert_ok!(Pools::unbond(Origin::signed(10), 10, 1)); @@ -2002,7 +2730,7 @@ mod unbond { vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1 } + Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1, era: 3 } ] ); @@ -2028,7 +2756,7 @@ mod unbond { ); assert_eq!( pool_events_since_last_call(), - vec![Event::Unbonded { member: 10, pool_id: 1, points: 5, balance: 5 }] + vec![Event::Unbonded { member: 10, pool_id: 1, points: 5, balance: 5, era: 3 }] ); // when: casual further unbond, next era. @@ -2055,17 +2783,17 @@ mod unbond { ); assert_eq!( pool_events_since_last_call(), - vec![Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1 }] + vec![Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1, era: 4 }] ); // when: unbonding more than our active: error - assert_err!( - frame_support::storage::in_storage_layer(|| Pools::unbond( + assert_noop!( + frame_support::storage::with_storage_layer(|| Pools::unbond( Origin::signed(10), 10, 5 )), - Error::::NotEnoughPointsToUnbond + Error::::MinimumBondNotMet ); // instead: assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); @@ -2090,33 +2818,31 @@ mod unbond { ); assert_eq!( pool_events_since_last_call(), - vec![Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3 }] + vec![Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3, era: 4 }] ); }); } #[test] fn partial_unbond_max_chunks() { - ExtBuilder::default().ed(1).build_and_execute(|| { - // so the depositor can leave, just keeps the test simpler. - unsafe_set_state(1, PoolState::Destroying).unwrap(); + ExtBuilder::default().add_members(vec![(20, 20)]).ed(1).build_and_execute(|| { MaxUnbonding::set(2); // given - assert_ok!(Pools::unbond(Origin::signed(10), 10, 2)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); CurrentEra::set(1); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 3)); assert_eq!( - PoolMembers::::get(10).unwrap().unbonding_eras, + PoolMembers::::get(20).unwrap().unbonding_eras, member_unbonding_eras!(3 => 2, 4 => 3) ); // when CurrentEra::set(2); - assert_err!( - frame_support::storage::in_storage_layer(|| Pools::unbond( - Origin::signed(10), - 10, + assert_noop!( + frame_support::storage::with_storage_layer(|| Pools::unbond( + Origin::signed(20), + 20, 4 )), Error::::MaxUnbondingLimit @@ -2124,30 +2850,35 @@ mod unbond { // when MaxUnbonding::set(3); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 1)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 1)); + assert_eq!( - PoolMembers::::get(10).unwrap().unbonding_eras, + PoolMembers::::get(20).unwrap().unbonding_eras, member_unbonding_eras!(3 => 2, 4 => 3, 5 => 1) ); + assert_eq!( pool_events_since_last_call(), vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 10, pool_id: 1, points: 2, balance: 2 }, - Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3 }, - Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1 } + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::Unbonded { member: 20, pool_id: 1, points: 2, balance: 2, era: 3 }, + Event::Unbonded { member: 20, pool_id: 1, points: 3, balance: 3, era: 4 }, + Event::Unbonded { member: 20, pool_id: 1, points: 1, balance: 1, era: 5 } ] ); }) } - // depositor can unbond inly up to `MinCreateBond`. + // depositor can unbond only up to `MinCreateBond`. #[test] fn depositor_permissioned_partial_unbond() { ExtBuilder::default().ed(1).build_and_execute(|| { // given - assert_eq!(MinCreateBond::::get(), 2); + StakingMinBond::set(5); + assert_eq!(Pools::depositor_min_bond(), 5); + assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 10); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 0); @@ -2159,7 +2890,7 @@ mod unbond { // but not less than 2 assert_noop!( Pools::unbond(Origin::signed(10), 10, 6), - Error::::NotOnlyPoolMember + Error::::MinimumBondNotMet ); assert_eq!( @@ -2167,13 +2898,12 @@ mod unbond { vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3 } + Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3, era: 3 } ] ); }); } - // same as above, but the pool is slashed and therefore the depositor cannot partially unbond. #[test] fn depositor_permissioned_partial_unbond_slashed() { ExtBuilder::default().ed(1).build_and_execute(|| { @@ -2188,78 +2918,69 @@ mod unbond { // cannot unbond even 7, because the value of shares is now less. assert_noop!( Pools::unbond(Origin::signed(10), 10, 7), - Error::::NotOnlyPoolMember - ); - assert_eq!( - pool_events_since_last_call(), - vec![ - Event::Created { depositor: 10, pool_id: 1 }, - Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - ] + Error::::MinimumBondNotMet ); }); } #[test] fn every_unbonding_triggers_payout() { - ExtBuilder::default().build_and_execute(|| { - let initial_reward_account = Balances::free_balance(Pools::create_reward_account(1)); + ExtBuilder::default().add_members(vec![(20, 20)]).build_and_execute(|| { + let initial_reward_account = Balances::free_balance(default_reward_account()); assert_eq!(initial_reward_account, Balances::minimum_balance()); assert_eq!(initial_reward_account, 5); - // set the pool to destroying so that depositor can leave. - unsafe_set_state(1, PoolState::Destroying).unwrap(); - Balances::make_free_balance_be( - &Pools::create_reward_account(1), - 2 * Balances::minimum_balance(), + &default_reward_account(), + 4 * Balances::minimum_balance(), ); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 2)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); assert_eq!( pool_events_since_last_call(), vec![ + // 2/3 of ed, which is 20's share. Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - // exactly equal to ed, all that can be claimed. - Event::PaidOut { member: 10, pool_id: 1, payout: 5 }, - Event::Unbonded { member: 10, pool_id: 1, points: 2, balance: 2 } + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::PaidOut { member: 20, pool_id: 1, payout: 10 }, + Event::Unbonded { member: 20, pool_id: 1, balance: 2, points: 2, era: 3 } ] ); CurrentEra::set(1); Balances::make_free_balance_be( - &Pools::create_reward_account(1), - 2 * Balances::minimum_balance(), + &default_reward_account(), + 4 * Balances::minimum_balance(), ); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 3)); assert_eq!( pool_events_since_last_call(), vec![ - // exactly equal to ed, all that can be claimed. - Event::PaidOut { member: 10, pool_id: 1, payout: 5 }, - Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3 } + // 2/3 of ed, which is 20's share. + Event::PaidOut { member: 20, pool_id: 1, payout: 6 }, + Event::Unbonded { member: 20, pool_id: 1, points: 3, balance: 3, era: 4 } ] ); CurrentEra::set(2); Balances::make_free_balance_be( - &Pools::create_reward_account(1), - 2 * Balances::minimum_balance(), + &default_reward_account(), + 4 * Balances::minimum_balance(), ); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); assert_eq!( pool_events_since_last_call(), vec![ - Event::PaidOut { member: 10, pool_id: 1, payout: 5 }, - Event::Unbonded { member: 10, pool_id: 1, points: 5, balance: 5 } + Event::PaidOut { member: 20, pool_id: 1, payout: 3 }, + Event::Unbonded { member: 20, pool_id: 1, points: 5, balance: 5, era: 5 } ] ); assert_eq!( - PoolMembers::::get(10).unwrap().unbonding_eras, + PoolMembers::::get(20).unwrap().unbonding_eras, member_unbonding_eras!(3 => 2, 4 => 3, 5 => 5) ); }); @@ -2353,7 +3074,6 @@ mod withdraw_unbonded { with_era: Default::default() } ); - assert_eq!( pool_events_since_last_call(), vec![ @@ -2361,8 +3081,14 @@ mod withdraw_unbonded { Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 40, pool_id: 1, bonded: 40, joined: true }, Event::Bonded { member: 550, pool_id: 1, bonded: 550, joined: true }, - Event::Unbonded { member: 550, pool_id: 1, points: 550, balance: 550 }, - Event::Unbonded { member: 40, pool_id: 1, points: 40, balance: 40 }, + Event::Unbonded { + member: 550, + pool_id: 1, + points: 550, + balance: 550, + era: 3 + }, + Event::Unbonded { member: 40, pool_id: 1, points: 40, balance: 40, era: 3 }, ] ); assert_eq!( @@ -2416,7 +3142,7 @@ mod withdraw_unbonded { ); // now, finally, the depositor can take out its share. - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); assert_ok!(fully_unbond_permissioned(10)); current_era += 3; @@ -2427,7 +3153,7 @@ mod withdraw_unbonded { assert_eq!( pool_events_since_last_call(), vec![ - Event::Unbonded { member: 10, pool_id: 1, balance: 5, points: 5 }, + Event::Unbonded { member: 10, pool_id: 1, balance: 5, points: 5, era: 9 }, Event::Withdrawn { member: 10, pool_id: 1, balance: 5, points: 5 }, Event::MemberRemoved { pool_id: 1, member: 10 }, Event::Destroyed { pool_id: 1 } @@ -2461,7 +3187,8 @@ mod withdraw_unbonded { assert_eq!( SubPoolsStorage::::get(&1).unwrap().with_era, - unbonding_pools_with_era! { 3 => UnbondPool { points: 550 / 2 + 40 / 2, balance: 550 / 2 + 40 / 2 }} + unbonding_pools_with_era! { 3 => UnbondPool { points: 550 / 2 + 40 / 2, balance: 550 / 2 + 40 / 2 + }} ); assert_eq!( @@ -2471,8 +3198,14 @@ mod withdraw_unbonded { Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 40, pool_id: 1, bonded: 40, joined: true }, Event::Bonded { member: 550, pool_id: 1, bonded: 550, joined: true }, - Event::Unbonded { member: 40, pool_id: 1, balance: 20, points: 20 }, - Event::Unbonded { member: 550, pool_id: 1, balance: 275, points: 275 }, + Event::Unbonded { member: 40, pool_id: 1, balance: 20, points: 20, era: 3 }, + Event::Unbonded { + member: 550, + pool_id: 1, + balance: 275, + points: 275, + era: 3, + } ] ); assert_eq!( @@ -2525,7 +3258,7 @@ mod withdraw_unbonded { assert!(SubPoolsStorage::::get(&1).unwrap().with_era.is_empty()); // now, finally, the depositor can take out its share. - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); assert_ok!(fully_unbond_permissioned(10)); // because everyone else has left, the points @@ -2540,7 +3273,7 @@ mod withdraw_unbonded { assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); // then - assert_eq!(Balances::free_balance(&10), 10 + 5); + assert_eq!(Balances::free_balance(&10), 10 + 35); assert_eq!(Balances::free_balance(&default_bonded_account()), 0); // in this test 10 also gets a fair share of the slash, because the slash was @@ -2548,7 +3281,7 @@ mod withdraw_unbonded { assert_eq!( pool_events_since_last_call(), vec![ - Event::Unbonded { member: 10, pool_id: 1, points: 5, balance: 5 }, + Event::Unbonded { member: 10, pool_id: 1, points: 5, balance: 5, era: 6 }, Event::Withdrawn { member: 10, pool_id: 1, points: 5, balance: 5 }, Event::MemberRemoved { pool_id: 1, member: 10 }, Event::Destroyed { pool_id: 1 } @@ -2569,9 +3302,9 @@ mod withdraw_unbonded { ExtBuilder::default().build_and_execute(|| { // Given assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(Balances::free_balance(&10), 5); + assert_eq!(Balances::free_balance(&10), 35); assert_eq!(Balances::free_balance(&default_bonded_account()), 10); - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); assert_ok!(Pools::fully_unbond(Origin::signed(10), 10)); // Simulate a slash that is not accounted for in the sub pools. @@ -2588,7 +3321,7 @@ mod withdraw_unbonded { assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); // Then - assert_eq!(Balances::free_balance(10), 10 + 5); + assert_eq!(Balances::free_balance(10), 10 + 35); assert_eq!(Balances::free_balance(&default_bonded_account()), 0); }); } @@ -2662,13 +3395,25 @@ mod withdraw_unbonded { Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 100, pool_id: 1, bonded: 100, joined: true }, Event::Bonded { member: 200, pool_id: 1, bonded: 200, joined: true }, - Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100 }, - Event::Unbonded { member: 200, pool_id: 1, points: 200, balance: 200 } + Event::Unbonded { + member: 100, + pool_id: 1, + points: 100, + balance: 100, + era: 3 + }, + Event::Unbonded { + member: 200, + pool_id: 1, + points: 200, + balance: 200, + era: 3 + } ] ); // Given - unsafe_set_state(1, PoolState::Blocked).unwrap(); + unsafe_set_state(1, PoolState::Blocked); // Cannot kick as a nominator assert_noop!( @@ -2726,7 +3471,7 @@ mod withdraw_unbonded { ); // Given - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); // Can permissionlesly withdraw a member that is not the depositor assert_ok!(Pools::withdraw_unbonded(Origin::signed(420), 100, 0)); @@ -2740,7 +3485,7 @@ mod withdraw_unbonded { Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 100, pool_id: 1, bonded: 100, joined: true }, - Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100 }, + Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100, era: 3 }, Event::Withdrawn { member: 100, pool_id: 1, points: 100, balance: 100 }, Event::MemberRemoved { pool_id: 1, member: 100 } ] @@ -2751,8 +3496,8 @@ mod withdraw_unbonded { #[test] fn partial_withdraw_unbonded_depositor() { ExtBuilder::default().ed(1).build_and_execute(|| { - // so the depositor can leave, just keeps the test simpler. - unsafe_set_state(1, PoolState::Destroying).unwrap(); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + unsafe_set_state(1, PoolState::Destroying); // given assert_ok!(Pools::unbond(Origin::signed(10), 10, 6)); @@ -2772,15 +3517,16 @@ mod withdraw_unbonded { } } ); - assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 3); + assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 13); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 7); assert_eq!( pool_events_since_last_call(), vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 10, pool_id: 1, points: 6, balance: 6 }, - Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1 } + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: false }, + Event::Unbonded { member: 10, pool_id: 1, points: 6, balance: 6, era: 3 }, + Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1, era: 4 } ] ); @@ -2866,8 +3612,8 @@ mod withdraw_unbonded { Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 11, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 11, pool_id: 1, points: 6, balance: 6 }, - Event::Unbonded { member: 11, pool_id: 1, points: 1, balance: 1 } + Event::Unbonded { member: 11, pool_id: 1, points: 6, balance: 6, era: 3 }, + Event::Unbonded { member: 11, pool_id: 1, points: 1, balance: 1, era: 4 } ] ); @@ -2956,8 +3702,8 @@ mod withdraw_unbonded { Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 100, pool_id: 1, bonded: 100, joined: true }, - Event::Unbonded { member: 100, pool_id: 1, points: 75, balance: 75 }, - Event::Unbonded { member: 100, pool_id: 1, points: 25, balance: 25 }, + Event::Unbonded { member: 100, pool_id: 1, points: 75, balance: 75, era: 3 }, + Event::Unbonded { member: 100, pool_id: 1, points: 25, balance: 25, era: 4 }, Event::Withdrawn { member: 100, pool_id: 1, points: 75, balance: 75 }, ] ); @@ -2982,50 +3728,72 @@ mod withdraw_unbonded { #[test] fn full_multi_step_withdrawing_depositor() { ExtBuilder::default().ed(1).build_and_execute(|| { - // given + // depositor now has 20, they can unbond to 10. + assert_eq!(Pools::depositor_min_bond(), 10); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + + // now they can. assert_ok!(Pools::unbond(Origin::signed(10), 10, 7)); // progress one era and unbond the leftover. CurrentEra::set(1); - unsafe_set_state(1, PoolState::Destroying).unwrap(); assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); + assert_eq!( PoolMembers::::get(10).unwrap().unbonding_eras, member_unbonding_eras!(3 => 7, 4 => 3) ); + // they can't unbond to a value below 10 other than 0.. assert_noop!( - Pools::withdraw_unbonded(Origin::signed(10), 10, 0), - Error::::CannotWithdrawAny + Pools::unbond(Origin::signed(10), 10, 5), + Error::::MinimumBondNotMet + ); + + // but not even full, because they pool is not yet destroying. + assert_noop!( + Pools::unbond(Origin::signed(10), 10, 10), + Error::::MinimumBondNotMet + ); + + // but now they can. + unsafe_set_state(1, PoolState::Destroying); + assert_noop!( + Pools::unbond(Origin::signed(10), 10, 5), + Error::::MinimumBondNotMet ); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 10)); // now the 7 should be free. CurrentEra::set(3); assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_eq!( pool_events_since_last_call(), vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 10, pool_id: 1, points: 7, balance: 7 }, - Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3 }, - Event::Withdrawn { member: 10, pool_id: 1, points: 7, balance: 7 } + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: false }, + Event::Unbonded { member: 10, pool_id: 1, balance: 7, points: 7, era: 3 }, + Event::Unbonded { member: 10, pool_id: 1, balance: 3, points: 3, era: 4 }, + Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 4 }, + Event::Withdrawn { member: 10, pool_id: 1, balance: 7, points: 7 } ] ); assert_eq!( PoolMembers::::get(10).unwrap().unbonding_eras, - member_unbonding_eras!(4 => 3) + member_unbonding_eras!(4 => 13) ); - // the 25 should be free now, and the member removed. + // the 13 should be free now, and the member removed. CurrentEra::set(4); assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_eq!( pool_events_since_last_call(), vec![ - Event::Withdrawn { member: 10, pool_id: 1, points: 3, balance: 3 }, + Event::Withdrawn { member: 10, pool_id: 1, points: 13, balance: 13 }, Event::MemberRemoved { pool_id: 1, member: 10 }, - // the pool is also destroyed now. Event::Destroyed { pool_id: 1 }, ] ); @@ -3089,11 +3857,7 @@ mod create { ); assert_eq!( RewardPools::::get(2).unwrap(), - RewardPool { - balance: Zero::zero(), - points: U256::zero(), - total_earnings: Zero::zero(), - } + RewardPool { ..Default::default() } ); assert_eq!( @@ -3258,7 +4022,7 @@ mod set_state { // If the pool is not ok to be open, then anyone can set it to destroying // Given - unsafe_set_state(1, PoolState::Open).unwrap(); + unsafe_set_state(1, PoolState::Open); let mut bonded_pool = BondedPool::::get(1).unwrap(); bonded_pool.points = 100; bonded_pool.put(); @@ -3269,7 +4033,7 @@ mod set_state { // Given Balances::make_free_balance_be(&default_bonded_account(), Balance::max_value() / 10); - unsafe_set_state(1, PoolState::Open).unwrap(); + unsafe_set_state(1, PoolState::Open); // When assert_ok!(Pools::set_state(Origin::signed(11), 1, PoolState::Destroying)); // Then @@ -3277,7 +4041,7 @@ mod set_state { // If the pool is not ok to be open, it cannot be permissionleslly set to a state that // isn't destroying - unsafe_set_state(1, PoolState::Open).unwrap(); + unsafe_set_state(1, PoolState::Open); assert_noop!( Pools::set_state(Origin::signed(11), 1, PoolState::Blocked), Error::::CanNotChangeState @@ -3438,13 +4202,13 @@ mod bond_extra { // given assert_eq!(PoolMembers::::get(10).unwrap().points, 10); assert_eq!(BondedPools::::get(1).unwrap().points, 10); - assert_eq!(Balances::free_balance(10), 5); + assert_eq!(Balances::free_balance(10), 35); // when assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::Rewards)); // then - assert_eq!(Balances::free_balance(10), 5); + assert_eq!(Balances::free_balance(10), 35); assert_eq!(PoolMembers::::get(10).unwrap().points, 10 + claimable_reward); assert_eq!(BondedPools::::get(1).unwrap().points, 10 + claimable_reward); @@ -3480,14 +4244,14 @@ mod bond_extra { assert_eq!(PoolMembers::::get(10).unwrap().points, 10); assert_eq!(PoolMembers::::get(20).unwrap().points, 20); assert_eq!(BondedPools::::get(1).unwrap().points, 30); - assert_eq!(Balances::free_balance(10), 5); + assert_eq!(Balances::free_balance(10), 35); assert_eq!(Balances::free_balance(20), 20); // when assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::Rewards)); // then - assert_eq!(Balances::free_balance(10), 5); + assert_eq!(Balances::free_balance(10), 35); // 10's share of the reward is 1/3, since they gave 10/30 of the total shares. assert_eq!(PoolMembers::::get(10).unwrap().points, 10 + 1); assert_eq!(BondedPools::::get(1).unwrap().points, 30 + 1); @@ -3688,3 +4452,310 @@ mod update_roles { }) } } + +mod reward_counter_precision { + use sp_runtime::FixedU128; + + use super::*; + + const DOT: Balance = 10u128.pow(10u32); + const POLKADOT_TOTAL_ISSUANCE_GENESIS: Balance = DOT * 10u128.pow(9u32); + + const fn inflation(years: u128) -> u128 { + let mut i = 0; + let mut start = POLKADOT_TOTAL_ISSUANCE_GENESIS; + while i < years { + start = start + start / 10; + i += 1 + } + start + } + + fn default_pool_reward_counter() -> FixedU128 { + RewardPools::::get(1) + .unwrap() + .current_reward_counter(1, BondedPools::::get(1).unwrap().points) + .unwrap() + } + + fn pending_rewards(of: AccountId) -> Option> { + let member = PoolMembers::::get(of).unwrap(); + assert_eq!(member.pool_id, 1); + let rc = default_pool_reward_counter(); + member.pending_rewards(rc).ok() + } + + #[test] + fn smallest_claimable_reward() { + // create a pool that has all of the polkadot issuance in 50 years. + let pool_bond = inflation(50); + ExtBuilder::default().ed(DOT).min_bond(pool_bond).build_and_execute(|| { + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { + member: 10, + pool_id: 1, + bonded: 1173908528796953165005, + joined: true, + } + ] + ); + + // the smallest reward that this pool can handle is + let expected_smallest_reward = inflation(50) / 10u128.pow(18); + + // tad bit less. cannot be paid out. + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += + expected_smallest_reward - 1)); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_eq!(pool_events_since_last_call(), vec![]); + // revert it. + + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= + expected_smallest_reward - 1)); + + // tad bit more. can be claimed. + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += + expected_smallest_reward + 1)); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 1173 }] + ); + }) + } + + #[test] + fn reward_counter_calc_wont_fail_in_normal_polkadot_future() { + // create a pool that has roughly half of the polkadot issuance in 10 years. + let pool_bond = inflation(10) / 2; + ExtBuilder::default().ed(DOT).min_bond(pool_bond).build_and_execute(|| { + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { + member: 10, + pool_id: 1, + bonded: 12_968_712_300_500_000_000, + joined: true, + } + ] + ); + + // in 10 years, the total claimed rewards are large values as well. assuming that a pool + // is earning all of the inflation per year (which is really unrealistic, but worse + // case), that will be: + let pool_total_earnings_10_years = inflation(10) - POLKADOT_TOTAL_ISSUANCE_GENESIS; + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += + pool_total_earnings_10_years)); + + // some whale now joins with the other half ot the total issuance. This will bloat all + // the calculation regarding current reward counter. + Balances::make_free_balance_be(&20, pool_bond * 2); + assert_ok!(Pools::join(Origin::signed(20), pool_bond, 1)); + + assert_eq!( + pool_events_since_last_call(), + vec![Event::Bonded { + member: 20, + pool_id: 1, + bonded: 12_968_712_300_500_000_000, + joined: true + }] + ); + + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 15937424600999999996 }] + ); + + // now let a small member join with 10 DOTs. + Balances::make_free_balance_be(&30, 20 * DOT); + assert_ok!(Pools::join(Origin::signed(30), 10 * DOT, 1)); + + // and give a reasonably small reward to the pool. + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += DOT)); + + assert_ok!(Pools::claim_payout(Origin::signed(30))); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Bonded { member: 30, pool_id: 1, bonded: 100000000000, joined: true }, + // quite small, but working fine. + Event::PaidOut { member: 30, pool_id: 1, payout: 38 } + ] + ); + }) + } + + #[test] + fn reward_counter_update_can_fail_if_pool_is_highly_slashed() { + // create a pool that has roughly half of the polkadot issuance in 10 years. + let pool_bond = inflation(10) / 2; + ExtBuilder::default().ed(DOT).min_bond(pool_bond).build_and_execute(|| { + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { + member: 10, + pool_id: 1, + bonded: 12_968_712_300_500_000_000, + joined: true, + } + ] + ); + + // slash this pool by 99% of that. + StakingMock::set_bonded_balance(default_bonded_account(), DOT + pool_bond / 100); + + // some whale now joins with the other half ot the total issuance. This will trigger an + // overflow. This test is actually a bit too lenient because all the reward counters are + // set to zero. In other tests that we want to assert a scenario won't fail, we should + // also set the reward counters to some large value. + Balances::make_free_balance_be(&20, pool_bond * 2); + assert_err!(Pools::join(Origin::signed(20), pool_bond, 1), Error::::OverflowRisk); + }) + } + + #[test] + fn if_small_member_waits_long_enough_they_will_earn_rewards() { + // create a pool that has a quarter of the current polkadot issuance + ExtBuilder::default() + .ed(DOT) + .min_bond(POLKADOT_TOTAL_ISSUANCE_GENESIS / 4) + .build_and_execute(|| { + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { + member: 10, + pool_id: 1, + bonded: 2500000000000000000, + joined: true, + } + ] + ); + + // and have a tiny fish join the pool as well.. + Balances::make_free_balance_be(&20, 20 * DOT); + assert_ok!(Pools::join(Origin::signed(20), 10 * DOT, 1)); + + // earn some small rewards + assert_ok!( + Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) + ); + + // no point in claiming for 20 (nonetheless, it should be harmless) + assert!(pending_rewards(20).unwrap().is_zero()); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Bonded { + member: 20, + pool_id: 1, + bonded: 100000000000, + joined: true + }, + Event::PaidOut { member: 10, pool_id: 1, payout: 9999997 } + ] + ); + + // earn some small more, still nothing can be claimed for 20, but 10 claims their + // share. + assert_ok!( + Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) + ); + assert!(pending_rewards(20).unwrap().is_zero()); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10000000 }] + ); + + // earn some more rewards, this time 20 can also claim. + assert_ok!( + Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) + ); + assert_eq!(pending_rewards(20).unwrap(), 1); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 10000000 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 1 } + ] + ); + }); + } + + #[test] + fn zero_reward_claim_does_not_update_reward_counter() { + // create a pool that has a quarter of the current polkadot issuance + ExtBuilder::default() + .ed(DOT) + .min_bond(POLKADOT_TOTAL_ISSUANCE_GENESIS / 4) + .build_and_execute(|| { + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { + member: 10, + pool_id: 1, + bonded: 2500000000000000000, + joined: true, + } + ] + ); + + // and have a tiny fish join the pool as well.. + Balances::make_free_balance_be(&20, 20 * DOT); + assert_ok!(Pools::join(Origin::signed(20), 10 * DOT, 1)); + + // earn some small rewards + assert_ok!( + Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) + ); + + // if 20 claims now, their reward counter should stay the same, so that they have a + // chance of claiming this if they let it accumulate. Also see + // `if_small_member_waits_long_enough_they_will_earn_rewards` + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Bonded { + member: 20, + pool_id: 1, + bonded: 100000000000, + joined: true + }, + Event::PaidOut { member: 10, pool_id: 1, payout: 9999997 } + ] + ); + + let current_reward_counter = default_pool_reward_counter(); + // has been updated, because they actually claimed something. + assert_eq!( + PoolMembers::::get(10).unwrap().last_recorded_reward_counter, + current_reward_counter + ); + // has not be updated, even though the claim transaction went through okay. + assert_eq!( + PoolMembers::::get(20).unwrap().last_recorded_reward_counter, + Default::default() + ); + }); + } +} diff --git a/frame/nomination-pools/src/weights.rs b/frame/nomination-pools/src/weights.rs index 8e3facfc5ec26..1f0d2ce8cddc4 100644 --- a/frame/nomination-pools/src/weights.rs +++ b/frame/nomination-pools/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_nomination_pools //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-06-10, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-06-15, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_nomination_pools. @@ -70,7 +70,7 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) - // Storage: NominationPools RewardPools (r:1 w:0) + // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:2 w:1) // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) // Storage: NominationPools MaxPoolMembers (r:1 w:0) @@ -80,23 +80,23 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn join() -> Weight { - (124_508_000 as Weight) - .saturating_add(T::DbWeight::get().reads(17 as Weight)) - .saturating_add(T::DbWeight::get().writes(11 as Weight)) + Weight::from_ref_time(123_947_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(17 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) - // Storage: System Account (r:2 w:2) + // Storage: System Account (r:3 w:2) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Bonded (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_transfer() -> Weight { - (115_185_000 as Weight) - .saturating_add(T::DbWeight::get().reads(13 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) + Weight::from_ref_time(118_236_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(14 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -108,18 +108,18 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_reward() -> Weight { - (132_723_000 as Weight) - .saturating_add(T::DbWeight::get().reads(14 as Weight)) - .saturating_add(T::DbWeight::get().writes(13 as Weight)) + Weight::from_ref_time(132_475_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(14 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(13 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:1 w:1) fn claim_payout() -> Weight { - (52_498_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(50_299_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -136,9 +136,9 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools SubPoolsStorage (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) fn unbond() -> Weight { - (121_645_000 as Weight) - .saturating_add(T::DbWeight::get().reads(18 as Weight)) - .saturating_add(T::DbWeight::get().writes(13 as Weight)) + Weight::from_ref_time(121_254_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(18 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(13 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -146,11 +146,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { - (43_320_000 as Weight) + Weight::from_ref_time(41_928_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -162,11 +162,11 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - (83_195_000 as Weight) - // Standard Error: 5_000 - .saturating_add((57_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) - .saturating_add(T::DbWeight::get().writes(7 as Weight)) + Weight::from_ref_time(81_611_000 as RefTimeWeight) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(7 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -189,9 +189,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - (143_495_000 as Weight) - .saturating_add(T::DbWeight::get().reads(19 as Weight)) - .saturating_add(T::DbWeight::get().writes(16 as Weight)) + Weight::from_ref_time(139_849_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(19 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(16 as RefTimeWeight)) } // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: NominationPools MinCreateBond (r:1 w:0) @@ -216,9 +216,9 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn create() -> Weight { - (127_998_000 as Weight) - .saturating_add(T::DbWeight::get().reads(22 as Weight)) - .saturating_add(T::DbWeight::get().writes(15 as Weight)) + Weight::from_ref_time(126_246_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(22 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(15 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -234,30 +234,30 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - (49_929_000 as Weight) - // Standard Error: 16_000 - .saturating_add((2_319_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(12 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(48_829_000 as RefTimeWeight) + // Standard Error: 10_000 + .saturating_add(Weight::from_ref_time(2_204_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Ledger (r:1 w:0) fn set_state() -> Weight { - (27_399_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_761_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: NominationPools Metadata (r:1 w:1) // Storage: NominationPools CounterForMetadata (r:1 w:1) /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { - (14_813_000 as Weight) + Weight::from_ref_time(14_519_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: NominationPools MinJoinBond (r:0 w:1) // Storage: NominationPools MaxPoolMembers (r:0 w:1) @@ -265,14 +265,14 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools MinCreateBond (r:0 w:1) // Storage: NominationPools MaxPools (r:0 w:1) fn set_configs() -> Weight { - (6_115_000 as Weight) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(6_173_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:1) fn update_roles() -> Weight { - (22_546_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_261_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -283,9 +283,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - (48_243_000 as Weight) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(47_959_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } } @@ -295,7 +295,7 @@ impl WeightInfo for () { // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) - // Storage: NominationPools RewardPools (r:1 w:0) + // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:2 w:1) // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) // Storage: NominationPools MaxPoolMembers (r:1 w:0) @@ -305,23 +305,23 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn join() -> Weight { - (124_508_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(17 as Weight)) - .saturating_add(RocksDbWeight::get().writes(11 as Weight)) + Weight::from_ref_time(123_947_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(17 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) - // Storage: System Account (r:2 w:2) + // Storage: System Account (r:3 w:2) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Bonded (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_transfer() -> Weight { - (115_185_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(13 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) + Weight::from_ref_time(118_236_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(14 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -333,18 +333,18 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_reward() -> Weight { - (132_723_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(14 as Weight)) - .saturating_add(RocksDbWeight::get().writes(13 as Weight)) + Weight::from_ref_time(132_475_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(14 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(13 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:1 w:1) fn claim_payout() -> Weight { - (52_498_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(50_299_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -361,9 +361,9 @@ impl WeightInfo for () { // Storage: NominationPools SubPoolsStorage (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) fn unbond() -> Weight { - (121_645_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(18 as Weight)) - .saturating_add(RocksDbWeight::get().writes(13 as Weight)) + Weight::from_ref_time(121_254_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(18 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(13 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -371,11 +371,11 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { - (43_320_000 as Weight) + Weight::from_ref_time(41_928_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -387,11 +387,11 @@ impl WeightInfo for () { // Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - (83_195_000 as Weight) - // Standard Error: 5_000 - .saturating_add((57_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) - .saturating_add(RocksDbWeight::get().writes(7 as Weight)) + Weight::from_ref_time(81_611_000 as RefTimeWeight) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(7 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -414,9 +414,9 @@ impl WeightInfo for () { // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - (143_495_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(19 as Weight)) - .saturating_add(RocksDbWeight::get().writes(16 as Weight)) + Weight::from_ref_time(139_849_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(19 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(16 as RefTimeWeight)) } // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: NominationPools MinCreateBond (r:1 w:0) @@ -441,9 +441,9 @@ impl WeightInfo for () { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn create() -> Weight { - (127_998_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(22 as Weight)) - .saturating_add(RocksDbWeight::get().writes(15 as Weight)) + Weight::from_ref_time(126_246_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(22 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(15 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -459,30 +459,30 @@ impl WeightInfo for () { // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - (49_929_000 as Weight) - // Standard Error: 16_000 - .saturating_add((2_319_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(12 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(48_829_000 as RefTimeWeight) + // Standard Error: 10_000 + .saturating_add(Weight::from_ref_time(2_204_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Ledger (r:1 w:0) fn set_state() -> Weight { - (27_399_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_761_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: NominationPools Metadata (r:1 w:1) // Storage: NominationPools CounterForMetadata (r:1 w:1) /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { - (14_813_000 as Weight) + Weight::from_ref_time(14_519_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: NominationPools MinJoinBond (r:0 w:1) // Storage: NominationPools MaxPoolMembers (r:0 w:1) @@ -490,14 +490,14 @@ impl WeightInfo for () { // Storage: NominationPools MinCreateBond (r:0 w:1) // Storage: NominationPools MaxPools (r:0 w:1) fn set_configs() -> Weight { - (6_115_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(6_173_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:1) fn update_roles() -> Weight { - (22_546_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_261_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -508,8 +508,8 @@ impl WeightInfo for () { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - (48_243_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(47_959_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } } diff --git a/frame/nomination-pools/test-staking/src/lib.rs b/frame/nomination-pools/test-staking/src/lib.rs index 2e40e8c6d917d..5a7cd494362ca 100644 --- a/frame/nomination-pools/test-staking/src/lib.rs +++ b/frame/nomination-pools/test-staking/src/lib.rs @@ -22,9 +22,11 @@ mod mock; use frame_support::{assert_noop, assert_ok, bounded_btree_map, traits::Currency}; use mock::*; use pallet_nomination_pools::{ - Error as PoolsError, Event as PoolsEvent, LastPoolId, PoolMember, PoolMembers, PoolState, + BondedPools, Error as PoolsError, Event as PoolsEvent, LastPoolId, PoolMember, PoolMembers, + PoolState, }; use pallet_staking::{CurrentEra, Event as StakingEvent, Payee, RewardDestination}; +use sp_runtime::traits::Zero; #[test] fn pool_lifecycle_e2e() { @@ -70,7 +72,7 @@ fn pool_lifecycle_e2e() { // depositor cannot unbond yet. assert_noop!( Pools::unbond(Origin::signed(10), 10, 50), - PoolsError::::NotOnlyPoolMember, + PoolsError::::MinimumBondNotMet, ); // now the members want to unbond. @@ -93,15 +95,15 @@ fn pool_lifecycle_e2e() { pool_events_since_last_call(), vec![ PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, - PoolsEvent::Unbonded { member: 20, pool_id: 1, points: 10, balance: 10 }, - PoolsEvent::Unbonded { member: 21, pool_id: 1, points: 10, balance: 10 }, + PoolsEvent::Unbonded { member: 20, pool_id: 1, points: 10, balance: 10, era: 3 }, + PoolsEvent::Unbonded { member: 21, pool_id: 1, points: 10, balance: 10, era: 3 }, ] ); // depositor cannot still unbond assert_noop!( Pools::unbond(Origin::signed(10), 10, 50), - PoolsError::::NotOnlyPoolMember, + PoolsError::::MinimumBondNotMet, ); for e in 1..BondingDuration::get() { @@ -118,7 +120,7 @@ fn pool_lifecycle_e2e() { // depositor cannot still unbond assert_noop!( Pools::unbond(Origin::signed(10), 10, 50), - PoolsError::::NotOnlyPoolMember, + PoolsError::::MinimumBondNotMet, ); // but members can now withdraw. @@ -157,7 +159,7 @@ fn pool_lifecycle_e2e() { ); assert_eq!( pool_events_since_last_call(), - vec![PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 50, balance: 50 }] + vec![PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 50, balance: 50, era: 6 }] ); // waiting another bonding duration: @@ -235,8 +237,8 @@ fn pool_slash_e2e() { assert_eq!( pool_events_since_last_call(), vec![ - PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10 }, - PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10 } + PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 4 }, + PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 4 } ] ); @@ -260,9 +262,9 @@ fn pool_slash_e2e() { assert_eq!( pool_events_since_last_call(), vec![ - PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10 }, - PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10 }, - PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 10, points: 10 }, + PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 5 }, + PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 5 }, + PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 10, points: 10, era: 5 }, ] ); @@ -273,7 +275,7 @@ fn pool_slash_e2e() { 30, &mut Default::default(), &mut Default::default(), - 1, // slash era 1, affects chunks at era 5 onwards. + 2, // slash era 2, affects chunks at era 5 onwards. ); assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 30)]); @@ -295,7 +297,7 @@ fn pool_slash_e2e() { PoolMember { pool_id: 1, points: 0, - reward_pool_total_earnings: 0, + last_recorded_reward_counter: Zero::zero(), // the 10 points unlocked just now correspond to 5 points in the unbond pool. unbonding_eras: bounded_btree_map!(5 => 10, 6 => 5) } @@ -303,7 +305,7 @@ fn pool_slash_e2e() { assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Unbonded(POOL1_BONDED, 5)]); assert_eq!( pool_events_since_last_call(), - vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 5, points: 5 }] + vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 5, points: 5, era: 6 }] ); // now we start withdrawing. we do it all at once, at era 6 where 20 and 21 are fully free. @@ -340,7 +342,7 @@ fn pool_slash_e2e() { pool_events_since_last_call(), vec![ PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, - PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 10, balance: 10 } + PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 10, balance: 10, era: 9 } ] ); @@ -350,7 +352,7 @@ fn pool_slash_e2e() { PoolMember { pool_id: 1, points: 0, - reward_pool_total_earnings: 0, + last_recorded_reward_counter: Zero::zero(), unbonding_eras: bounded_btree_map!(4 => 10, 5 => 10, 9 => 10) } ); @@ -371,3 +373,284 @@ fn pool_slash_e2e() { ); }); } + +#[test] +fn pool_slash_proportional() { + // a typical example where 3 pool members unbond in era 99, 100, and 101, and a slash that + // happened in era 100 should only affect the latter two. + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + BondingDuration::set(28); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + // have two members join + let bond = 20; + assert_ok!(Pools::join(Origin::signed(20), bond, 1)); + assert_ok!(Pools::join(Origin::signed(21), bond, 1)); + assert_ok!(Pools::join(Origin::signed(22), bond, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded(POOL1_BONDED, bond), + StakingEvent::Bonded(POOL1_BONDED, bond), + StakingEvent::Bonded(POOL1_BONDED, bond), + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: bond, joined: true }, + PoolsEvent::Bonded { member: 22, pool_id: 1, bonded: bond, joined: true }, + ] + ); + + // now let's progress a lot. + CurrentEra::::set(Some(99)); + + // and unbond + assert_ok!(Pools::unbond(Origin::signed(20), 20, bond)); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 20, + pool_id: 1, + balance: bond, + points: bond, + era: 127 + }] + ); + + CurrentEra::::set(Some(100)); + assert_ok!(Pools::unbond(Origin::signed(21), 21, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 21, + pool_id: 1, + balance: bond, + points: bond, + era: 128 + }] + ); + + CurrentEra::::set(Some(101)); + assert_ok!(Pools::unbond(Origin::signed(22), 22, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 22, + pool_id: 1, + balance: bond, + points: bond, + era: 129 + }] + ); + + // Apply a slash that happened in era 100. This is typically applied with a delay. + // Of the total 100, 50 is slashed. + assert_eq!(BondedPools::::get(1).unwrap().points, 40); + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 50, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 50)]); + assert_eq!( + pool_events_since_last_call(), + vec![ + // This last pool got slashed only the leftover dust. Otherwise in principle, this + // chunk/pool should have not been affected. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 127, balance: 19 }, + // This pool got slashed 12.5, which rounded down to 12. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 128, balance: 8 }, + // This pool got slashed 12.5, which rounded down to 12. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 129, balance: 8 }, + // Bonded pool got slashed for 25, remaining 15 in it. + PoolsEvent::PoolSlashed { pool_id: 1, balance: 15 } + ] + ); + }); +} + +#[test] +fn pool_slash_non_proportional_only_bonded_pool() { + // A typical example where a pool member unbonds in era 99, and he can get away with a slash + // that happened in era 100, as long as the pool has enough active bond to cover the slash. If + // everything else in the slashing/staking system works, this should always be the case. + // Nonetheless, `ledger.slash` has been written such that it will slash greedily from any chunk + // if it runs out of chunks that it thinks should be affected by the slash. + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + BondingDuration::set(28); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + // have two members join + let bond = 20; + assert_ok!(Pools::join(Origin::signed(20), bond, 1)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded(POOL1_BONDED, bond)] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }] + ); + + // progress and unbond. + CurrentEra::::set(Some(99)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 20, + pool_id: 1, + balance: bond, + points: bond, + era: 127 + }] + ); + + // slash for 30. This will be deducted only from the bonded pool. + CurrentEra::::set(Some(100)); + assert_eq!(BondedPools::::get(1).unwrap().points, 40); + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 30, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 30)]); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::PoolSlashed { pool_id: 1, balance: 10 }] + ); + }); +} + +#[test] +fn pool_slash_non_proportional_bonded_pool_and_chunks() { + // An uncommon example where even though some funds are unlocked such that they should not be + // affected by a slash, we still slash out of them. This should not happen at all. If a + // nomination has unbonded, from the next era onwards, their exposure will drop, so if an era + // happens in that era, then their share of that slash should naturally be less, such that only + // their active ledger stake is enough to compensate it. + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + BondingDuration::set(28); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + // have two members join + let bond = 20; + assert_ok!(Pools::join(Origin::signed(20), bond, 1)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded(POOL1_BONDED, bond)] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }] + ); + + // progress and unbond. + CurrentEra::::set(Some(99)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 20, + pool_id: 1, + balance: bond, + points: bond, + era: 127 + }] + ); + + // slash 50. This will be deducted only from the bonded pool and one of the unbonding pools. + CurrentEra::::set(Some(100)); + assert_eq!(BondedPools::::get(1).unwrap().points, 40); + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 50, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 50)]); + assert_eq!( + pool_events_since_last_call(), + vec![ + // out of 20, 10 was taken. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 127, balance: 10 }, + // out of 40, all was taken. + PoolsEvent::PoolSlashed { pool_id: 1, balance: 0 } + ] + ); + }); +} diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index 7b720c009b29b..055ba7b4b3c06 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -16,14 +16,25 @@ // limitations under the License. use frame_election_provider_support::VoteWeight; -use frame_support::{assert_ok, pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; -use sp_runtime::traits::{Convert, IdentityLookup}; +use frame_support::{ + assert_ok, + pallet_prelude::*, + parameter_types, + traits::{ConstU64, ConstU8}, + PalletId, +}; +use sp_runtime::{ + traits::{Convert, IdentityLookup}, + FixedU128, +}; type AccountId = u128; type AccountIndex = u32; type BlockNumber = u64; type Balance = u128; +pub(crate) type T = Runtime; + pub(crate) const POOL1_BONDED: AccountId = 20318131474730217858575332831085u128; pub(crate) const POOL1_REWARD: AccountId = 20397359637244482196168876781421u128; @@ -157,24 +168,18 @@ impl pallet_nomination_pools::Config for Runtime { type Event = Event; type WeightInfo = (); type Currency = Balances; + type CurrencyBalance = Balance; + type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; type StakingInterface = Staking; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; - type MinPointsToBalance = ConstU32<10>; + type MaxPointsToBalance = ConstU8<10>; type PalletId = PoolsPalletId; } -impl frame_system::offchain::SendTransactionTypes for Runtime -where - Call: From, -{ - type OverarchingCall = Call; - type Extrinsic = UncheckedExtrinsic; -} - type Block = frame_system::mocking::MockBlock; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -194,13 +199,14 @@ frame_support::construct_runtime!( ); pub fn new_test_ext() -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); let _ = pallet_nomination_pools::GenesisConfig:: { min_join_bond: 2, min_create_bond: 2, max_pools: Some(3), - max_members_per_pool: Some(3), - max_members: Some(3 * 3), + max_members_per_pool: Some(5), + max_members: Some(3 * 5), } .assimilate_storage(&mut storage) .unwrap(); diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 98c6390964d82..5d81a71d4c47c 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -288,15 +288,13 @@ benchmarks! { let (offenders, raw_offenders) = make_offenders_im_online::(o, n)?; let keys = ImOnline::::keys(); let validator_set_count = keys.len() as u32; - - let slash_fraction = UnresponsivenessOffence::::slash_fraction( - offenders.len() as u32, validator_set_count, - ); + let offenders_count = offenders.len() as u32; let offence = UnresponsivenessOffence { session_index: 0, validator_set_count, offenders, }; + let slash_fraction = offence.slash_fraction(offenders_count); assert_eq!(System::::event_count(), 0); }: { let _ = ::ReportUnresponsiveness::report_offence( @@ -307,7 +305,7 @@ benchmarks! { verify { let bond_amount: u32 = UniqueSaturatedInto::::unique_saturated_into(bond_amount::()); let slash_amount = slash_fraction * bond_amount; - let reward_amount = slash_amount * (1 + n) / 2; + let reward_amount = slash_amount.saturating_mul(1 + n) / 2; let reward = reward_amount / r; let slash = |id| core::iter::once( ::Event::from(StakingEvent::::Slashed(id, BalanceOf::::from(slash_amount))) diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index d51a81b1212c0..312bc4e18f413 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -40,7 +40,9 @@ type Balance = u64; parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); + frame_system::limits::BlockWeights::simple_max( + 2 * WEIGHT_PER_SECOND + ); } impl frame_system::Config for Test { diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index e4b75d9c3c015..ae454d6b06740 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -120,7 +120,6 @@ where fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { let offenders = offence.offenders(); let time_slot = offence.time_slot(); - let validator_set_count = offence.validator_set_count(); // Go through all offenders in the offence report and find all offenders that were spotted // in unique reports. @@ -134,7 +133,7 @@ where let offenders_count = concurrent_offenders.len() as u32; // The amount new offenders are slashed - let new_fraction = O::slash_fraction(offenders_count, validator_set_count); + let new_fraction = offence.slash_fraction(offenders_count); let slash_perbill: Vec<_> = (0..concurrent_offenders.len()).map(|_| new_fraction).collect(); diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index b3dfbdd90b19d..d9ecf44ad8734 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -67,7 +67,7 @@ impl offence::OnOffenceHandler } pub fn with_on_offence_fractions) -> R>(f: F) -> R { - ON_OFFENCE_PERBILL.with(|fractions| f(&mut *fractions.borrow_mut())) + ON_OFFENCE_PERBILL.with(|fractions| f(&mut fractions.borrow_mut())) } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -168,8 +168,8 @@ impl offence::Offence for Offence { 1 } - fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { - Perbill::from_percent(5 + offenders_count * 100 / validator_set_count) + fn slash_fraction(&self, offenders_count: u32) -> Perbill { + Perbill::from_percent(5 + offenders_count * 100 / self.validator_set_count) } } diff --git a/frame/preimage/src/mock.rs b/frame/preimage/src/mock.rs index 5f557e08c3e5b..20393bf42281f 100644 --- a/frame/preimage/src/mock.rs +++ b/frame/preimage/src/mock.rs @@ -50,7 +50,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2_000_000_000_000); + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(2_000_000_000_000)); } impl frame_system::Config for Test { type BaseCallFilter = Everything; diff --git a/frame/preimage/src/weights.rs b/frame/preimage/src/weights.rs index de3eb6607fe8c..183b704ec705d 100644 --- a/frame/preimage/src/weights.rs +++ b/frame/preimage/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_preimage. @@ -64,87 +64,87 @@ impl WeightInfo for SubstrateWeight { // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn note_preimage(s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:0) fn note_requested_preimage(s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:0) fn note_no_deposit_preimage(s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - (44_380_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(44_380_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - (30_280_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(30_280_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - (42_809_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(42_809_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - (28_964_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(28_964_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - (17_555_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(17_555_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - (7_745_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(7_745_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - (29_758_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_758_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_unnoted_preimage() -> Weight { - (18_360_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(18_360_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - (7_439_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(7_439_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -153,86 +153,86 @@ impl WeightInfo for () { // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn note_preimage(s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:0) fn note_requested_preimage(s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:0) fn note_no_deposit_preimage(s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - (44_380_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(44_380_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - (30_280_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(30_280_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - (42_809_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(42_809_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - (28_964_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(28_964_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - (17_555_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(17_555_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - (7_745_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(7_745_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - (29_758_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_758_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_unnoted_preimage() -> Weight { - (18_360_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(18_360_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - (7_439_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(7_439_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index 87017290a3ab9..c01a8da000c09 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -35,9 +35,11 @@ fn add_proxies(n: u32, maybe_who: Option) -> Result<(), let caller = maybe_who.unwrap_or_else(whitelisted_caller); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); for i in 0..n { + let real = T::Lookup::unlookup(account("target", i, SEED)); + Proxy::::add_proxy( RawOrigin::Signed(caller.clone()).into(), - account("target", i, SEED), + real, T::ProxyType::default(), T::BlockNumber::zero(), )?; @@ -51,6 +53,7 @@ fn add_announcements( maybe_real: Option, ) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED)); + let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); let real = if let Some(real) = maybe_real { real @@ -59,16 +62,17 @@ fn add_announcements( T::Currency::make_free_balance_be(&real, BalanceOf::::max_value() / 2u32.into()); Proxy::::add_proxy( RawOrigin::Signed(real.clone()).into(), - caller.clone(), + caller_lookup, T::ProxyType::default(), T::BlockNumber::zero(), )?; real }; + let real_lookup = T::Lookup::unlookup(real); for _ in 0..n { Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), - real.clone(), + real_lookup.clone(), T::CallHasher::hash_of(&("add_announcement", n)), )?; } @@ -83,8 +87,9 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real); let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); - }: _(RawOrigin::Signed(caller), real, Some(T::ProxyType::default()), Box::new(call)) + }: _(RawOrigin::Signed(caller), real_lookup, Some(T::ProxyType::default()), Box::new(call)) verify { assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) } @@ -95,17 +100,19 @@ benchmarks! { // In this case the caller is the "target" proxy let caller: T::AccountId = account("anonymous", 0, SEED); let delegate: T::AccountId = account("target", p - 1, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); T::Currency::make_free_balance_be(&delegate, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real); let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(delegate.clone()).into(), - real.clone(), + real_lookup.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(delegate.clone()), None)?; - }: _(RawOrigin::Signed(caller), delegate, real, Some(T::ProxyType::default()), Box::new(call)) + }: _(RawOrigin::Signed(caller), delegate_lookup, real_lookup, Some(T::ProxyType::default()), Box::new(call)) verify { assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) } @@ -118,14 +125,15 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real); let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), - real.clone(), + real_lookup.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(caller.clone()), None)?; - }: _(RawOrigin::Signed(caller.clone()), real, T::CallHasher::hash_of(&call)) + }: _(RawOrigin::Signed(caller.clone()), real_lookup, T::CallHasher::hash_of(&call)) verify { let (announcements, _) = Announcements::::get(&caller); assert_eq!(announcements.len() as u32, a); @@ -136,17 +144,19 @@ benchmarks! { let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); + let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real.clone()); let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), - real.clone(), + real_lookup, T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(caller.clone()), None)?; - }: _(RawOrigin::Signed(real), caller.clone(), T::CallHasher::hash_of(&call)) + }: _(RawOrigin::Signed(real), caller_lookup, T::CallHasher::hash_of(&call)) verify { let (announcements, _) = Announcements::::get(&caller); assert_eq!(announcements.len() as u32, a); @@ -160,10 +170,11 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real.clone()); add_announcements::(a, Some(caller.clone()), None)?; let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); let call_hash = T::CallHasher::hash_of(&call); - }: _(RawOrigin::Signed(caller.clone()), real.clone(), call_hash) + }: _(RawOrigin::Signed(caller.clone()), real_lookup, call_hash) verify { assert_last_event::(Event::Announced { real, proxy: caller, call_hash }.into()); } @@ -171,9 +182,10 @@ benchmarks! { add_proxy { let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); + let real = T::Lookup::unlookup(account("target", T::MaxProxies::get(), SEED)); }: _( RawOrigin::Signed(caller.clone()), - account("target", T::MaxProxies::get(), SEED), + real, T::ProxyType::default(), T::BlockNumber::zero() ) @@ -185,9 +197,10 @@ benchmarks! { remove_proxy { let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); + let delegate = T::Lookup::unlookup(account("target", 0, SEED)); }: _( RawOrigin::Signed(caller.clone()), - account("target", 0, SEED), + delegate, T::ProxyType::default(), T::BlockNumber::zero() ) @@ -228,6 +241,7 @@ benchmarks! { let p in 0 .. (T::MaxProxies::get() - 2); let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); Pallet::::anonymous( RawOrigin::Signed(whitelisted_caller()).into(), @@ -241,7 +255,7 @@ benchmarks! { add_proxies::(p, Some(anon.clone()))?; ensure!(Proxies::::contains_key(&anon), "anon proxy not created"); - }: _(RawOrigin::Signed(anon.clone()), caller.clone(), T::ProxyType::default(), 0, height, ext_index) + }: _(RawOrigin::Signed(anon.clone()), caller_lookup, T::ProxyType::default(), 0, height, ext_index) verify { assert!(!Proxies::::contains_key(&anon)); } diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 9945626efbeb1..2d8dfc238bcd0 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -45,7 +45,7 @@ use frame_system::{self as system}; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::{ - traits::{Dispatchable, Hash, Saturating, TrailingZeroInput, Zero}, + traits::{Dispatchable, Hash, Saturating, StaticLookup, TrailingZeroInput, Zero}, DispatchResult, }; use sp_std::prelude::*; @@ -58,6 +58,8 @@ type CallHashOf = <::CallHasher as Hash>::Output; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + /// The parameters under which a particular account has a proxy relationship with some other /// account. #[derive( @@ -197,18 +199,19 @@ pub mod pallet { #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy(T::MaxProxies::get()) - .saturating_add(di.weight) // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(di.weight), di.class) })] pub fn proxy( origin: OriginFor, - real: T::AccountId, + real: AccountIdLookupOf, force_proxy_type: Option, call: Box<::Call>, ) -> DispatchResult { let who = ensure_signed(origin)?; + let real = T::Lookup::lookup(real)?; let def = Self::find_proxy(&real, &who, force_proxy_type)?; ensure!(def.delay.is_zero(), Error::::Unannounced); @@ -233,11 +236,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::add_proxy(T::MaxProxies::get()))] pub fn add_proxy( origin: OriginFor, - delegate: T::AccountId, + delegate: AccountIdLookupOf, proxy_type: T::ProxyType, delay: T::BlockNumber, ) -> DispatchResult { let who = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; Self::add_proxy_delegate(&who, delegate, proxy_type, delay) } @@ -255,11 +259,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_proxy(T::MaxProxies::get()))] pub fn remove_proxy( origin: OriginFor, - delegate: T::AccountId, + delegate: AccountIdLookupOf, proxy_type: T::ProxyType, delay: T::BlockNumber, ) -> DispatchResult { let who = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; Self::remove_proxy_delegate(&who, delegate, proxy_type, delay) } @@ -359,13 +364,14 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::kill_anonymous(T::MaxProxies::get()))] pub fn kill_anonymous( origin: OriginFor, - spawner: T::AccountId, + spawner: AccountIdLookupOf, proxy_type: T::ProxyType, index: u16, #[pallet::compact] height: T::BlockNumber, #[pallet::compact] ext_index: u32, ) -> DispatchResult { let who = ensure_signed(origin)?; + let spawner = T::Lookup::lookup(spawner)?; let when = (height, ext_index); let proxy = Self::anonymous_account(&spawner, &proxy_type, index, Some(when)); @@ -401,10 +407,11 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get()))] pub fn announce( origin: OriginFor, - real: T::AccountId, + real: AccountIdLookupOf, call_hash: CallHashOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let real = T::Lookup::lookup(real)?; Proxies::::get(&real) .0 .into_iter() @@ -458,10 +465,11 @@ pub mod pallet { ))] pub fn remove_announcement( origin: OriginFor, - real: T::AccountId, + real: AccountIdLookupOf, call_hash: CallHashOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let real = T::Lookup::lookup(real)?; Self::edit_announcements(&who, |ann| ann.real != real || ann.call_hash != call_hash)?; Ok(()) @@ -489,10 +497,11 @@ pub mod pallet { ))] pub fn reject_announcement( origin: OriginFor, - delegate: T::AccountId, + delegate: AccountIdLookupOf, call_hash: CallHashOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; Self::edit_announcements(&delegate, |ann| { ann.real != who || ann.call_hash != call_hash })?; @@ -520,19 +529,21 @@ pub mod pallet { #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy_announced(T::MaxPending::get(), T::MaxProxies::get()) - .saturating_add(di.weight) // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(di.weight), di.class) })] pub fn proxy_announced( origin: OriginFor, - delegate: T::AccountId, - real: T::AccountId, + delegate: AccountIdLookupOf, + real: AccountIdLookupOf, force_proxy_type: Option, call: Box<::Call>, ) -> DispatchResult { ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; + let real = T::Lookup::lookup(real)?; let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; let call_hash = T::CallHasher::hash_of(&call); diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index a0807f1d3d0b6..b8d5a55705efa 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -54,7 +54,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; @@ -543,7 +543,7 @@ fn anonymous_works() { let anon = Proxy::anonymous_account(&1, &ProxyType::Any, 0, None); System::assert_last_event( ProxyEvent::AnonymousCreated { - anonymous: anon.clone(), + anonymous: anon, who: 1, proxy_type: ProxyType::Any, disambiguation_index: 0, diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs index 19beaf4d1401b..119df271e0d55 100644 --- a/frame/proxy/src/weights.rs +++ b/frame/proxy/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_proxy. @@ -61,97 +61,97 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Proxy Proxies (r:1 w:0) fn proxy(p: u32, ) -> Weight { - (17_768_000 as Weight) + Weight::from_ref_time(17_768_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((76_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(Weight::from_ref_time(76_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn proxy_announced(a: u32, p: u32, ) -> Weight { - (35_682_000 as Weight) + Weight::from_ref_time(35_682_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((158_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((73_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn remove_announcement(a: u32, p: u32, ) -> Weight { - (25_586_000 as Weight) + Weight::from_ref_time(25_586_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((175_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((18_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(18_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_announcement(a: u32, p: u32, ) -> Weight { - (25_794_000 as Weight) + Weight::from_ref_time(25_794_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((173_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((13_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(13_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn announce(a: u32, p: u32, ) -> Weight { - (33_002_000 as Weight) + Weight::from_ref_time(33_002_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((163_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(163_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((79_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn add_proxy(p: u32, ) -> Weight { - (28_166_000 as Weight) + Weight::from_ref_time(28_166_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((105_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(105_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn remove_proxy(p: u32, ) -> Weight { - (28_128_000 as Weight) + Weight::from_ref_time(28_128_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((118_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn remove_proxies(p: u32, ) -> Weight { - (24_066_000 as Weight) + Weight::from_ref_time(24_066_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((81_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(81_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: Proxy Proxies (r:1 w:1) fn anonymous(p: u32, ) -> Weight { - (31_077_000 as Weight) + Weight::from_ref_time(31_077_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((37_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn kill_anonymous(p: u32, ) -> Weight { - (24_657_000 as Weight) + Weight::from_ref_time(24_657_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((87_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -159,96 +159,96 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Proxy Proxies (r:1 w:0) fn proxy(p: u32, ) -> Weight { - (17_768_000 as Weight) + Weight::from_ref_time(17_768_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((76_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(Weight::from_ref_time(76_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn proxy_announced(a: u32, p: u32, ) -> Weight { - (35_682_000 as Weight) + Weight::from_ref_time(35_682_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((158_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((73_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn remove_announcement(a: u32, p: u32, ) -> Weight { - (25_586_000 as Weight) + Weight::from_ref_time(25_586_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((175_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((18_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(18_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_announcement(a: u32, p: u32, ) -> Weight { - (25_794_000 as Weight) + Weight::from_ref_time(25_794_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((173_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((13_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(13_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn announce(a: u32, p: u32, ) -> Weight { - (33_002_000 as Weight) + Weight::from_ref_time(33_002_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((163_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(163_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((79_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn add_proxy(p: u32, ) -> Weight { - (28_166_000 as Weight) + Weight::from_ref_time(28_166_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((105_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(105_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn remove_proxy(p: u32, ) -> Weight { - (28_128_000 as Weight) + Weight::from_ref_time(28_128_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((118_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn remove_proxies(p: u32, ) -> Weight { - (24_066_000 as Weight) + Weight::from_ref_time(24_066_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((81_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(81_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: Proxy Proxies (r:1 w:1) fn anonymous(p: u32, ) -> Weight { - (31_077_000 as Weight) + Weight::from_ref_time(31_077_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((37_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn kill_anonymous(p: u32, ) -> Weight { - (24_657_000 as Weight) + Weight::from_ref_time(24_657_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((87_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index f709578f6941a..467cae9728fae 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -69,7 +69,7 @@ use safe_mix::TripletMix; use codec::Encode; -use frame_support::traits::Randomness; +use frame_support::{pallet_prelude::Weight, traits::Randomness}; use sp_runtime::traits::{Hash, Saturating}; const RANDOM_MATERIAL_LEN: u32 = 81; @@ -187,7 +187,7 @@ mod tests { parameter_types! { pub BlockWeights: limits::BlockWeights = limits::BlockWeights - ::simple_max(1024); + ::simple_max(Weight::from_ref_time(1024)); pub BlockLength: limits::BlockLength = limits::BlockLength ::max(2 * 1024); } diff --git a/frame/ranked-collective/src/benchmarking.rs b/frame/ranked-collective/src/benchmarking.rs index ab1a5dc283ca5..611a4ce7334a9 100644 --- a/frame/ranked-collective/src/benchmarking.rs +++ b/frame/ranked-collective/src/benchmarking.rs @@ -33,11 +33,15 @@ fn assert_last_event, I: 'static>(generic_event: >:: fn make_member, I: 'static>(rank: Rank) -> T::AccountId { let who = account::("member", MemberCount::::get(0), SEED); - assert_ok!(Pallet::::add_member(T::PromoteOrigin::successful_origin(), who.clone())); + let who_lookup = T::Lookup::unlookup(who.clone()); + assert_ok!(Pallet::::add_member( + T::PromoteOrigin::successful_origin(), + who_lookup.clone() + )); for _ in 0..rank { assert_ok!(Pallet::::promote_member( T::PromoteOrigin::successful_origin(), - who.clone() + who_lookup.clone() )); } who @@ -46,8 +50,9 @@ fn make_member, I: 'static>(rank: Rank) -> T::AccountId { benchmarks_instance_pallet! { add_member { let who = account::("member", 0, SEED); + let who_lookup = T::Lookup::unlookup(who.clone()); let origin = T::PromoteOrigin::successful_origin(); - let call = Call::::add_member { who: who.clone() }; + let call = Call::::add_member { who: who_lookup }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_eq!(MemberCount::::get(0), 1); @@ -59,10 +64,11 @@ benchmarks_instance_pallet! { let rank = r as u16; let first = make_member::(rank); let who = make_member::(rank); + let who_lookup = T::Lookup::unlookup(who.clone()); let last = make_member::(rank); let last_index = (0..=rank).map(|r| IdToIndex::::get(r, &last).unwrap()).collect::>(); let origin = T::DemoteOrigin::successful_origin(); - let call = Call::::remove_member { who: who.clone(), min_rank: rank }; + let call = Call::::remove_member { who: who_lookup, min_rank: rank }; }: { call.dispatch_bypass_filter(origin)? } verify { for r in 0..=rank { @@ -76,8 +82,9 @@ benchmarks_instance_pallet! { let r in 0 .. 10; let rank = r as u16; let who = make_member::(rank); + let who_lookup = T::Lookup::unlookup(who.clone()); let origin = T::PromoteOrigin::successful_origin(); - let call = Call::::promote_member { who: who.clone() }; + let call = Call::::promote_member { who: who_lookup }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_eq!(Members::::get(&who).unwrap().rank, rank + 1); @@ -89,10 +96,11 @@ benchmarks_instance_pallet! { let rank = r as u16; let first = make_member::(rank); let who = make_member::(rank); + let who_lookup = T::Lookup::unlookup(who.clone()); let last = make_member::(rank); let last_index = IdToIndex::::get(rank, &last).unwrap(); let origin = T::DemoteOrigin::successful_origin(); - let call = Call::::demote_member { who: who.clone() }; + let call = Call::::demote_member { who: who_lookup }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_eq!(Members::::get(&who).map(|x| x.rank), rank.checked_sub(1)); @@ -106,14 +114,15 @@ benchmarks_instance_pallet! { vote { let caller: T::AccountId = whitelisted_caller(); - assert_ok!(Pallet::::add_member(T::PromoteOrigin::successful_origin(), caller.clone())); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + assert_ok!(Pallet::::add_member(T::PromoteOrigin::successful_origin(), caller_lookup.clone())); // Create a poll let class = T::Polls::classes().into_iter().next().unwrap(); let rank = T::MinRankOfClass::convert(class.clone()); for _ in 0..rank { assert_ok!(Pallet::::promote_member( T::PromoteOrigin::successful_origin(), - caller.clone() + caller_lookup.clone() )); } diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index 7ea43a9017445..b8eaac9823634 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -43,7 +43,11 @@ use scale_info::TypeInfo; use sp_arithmetic::traits::Saturating; -use sp_runtime::{traits::Convert, ArithmeticError::Overflow, Perbill, RuntimeDebug}; +use sp_runtime::{ + traits::{Convert, StaticLookup}, + ArithmeticError::Overflow, + Perbill, RuntimeDebug, +}; use sp_std::{marker::PhantomData, prelude::*}; use frame_support::{ @@ -85,16 +89,16 @@ pub type Votes = u32; Decode, MaxEncodedLen, )] -#[scale_info(skip_type_params(M))] +#[scale_info(skip_type_params(T, I, M))] #[codec(mel_bound())] -pub struct Tally { +pub struct Tally { bare_ayes: MemberIndex, ayes: Votes, nays: Votes, - dummy: PhantomData, + dummy: PhantomData<(T, I, M)>, } -impl Tally { +impl, I: 'static, M: GetMaxVoters> Tally { pub fn from_parts(bare_ayes: MemberIndex, ayes: Votes, nays: Votes) -> Self { Tally { bare_ayes, ayes, nays, dummy: PhantomData } } @@ -107,10 +111,11 @@ impl Tally { // All functions of VoteTally now include the class as a param. -pub type TallyOf = Tally>; +pub type TallyOf = Tally>; pub type PollIndexOf = <>::Polls as Polling>>::Index; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -impl VoteTally for Tally { +impl, I: 'static, M: GetMaxVoters> VoteTally for Tally { fn new(_: Rank) -> Self { Self { bare_ayes: 0, ayes: 0, nays: 0, dummy: PhantomData } } @@ -143,6 +148,20 @@ impl VoteTally for Tally { let nays = ((ayes as u64) * 1_000_000_000u64 / approval.deconstruct() as u64) as u32 - ayes; Self { bare_ayes: ayes, ayes, nays, dummy: PhantomData } } + + #[cfg(feature = "runtime-benchmarks")] + fn setup(class: Rank, granularity: Perbill) { + if M::get_max_voters(class) == 0 { + let max_voters = granularity.saturating_reciprocal_mul(1u32); + for i in 0..max_voters { + let who: T::AccountId = + frame_benchmarking::account("ranked_collective_benchmarking", i, 0); + crate::Pallet::::do_add_member_to_rank(who, class) + .expect("could not add members for benchmarks"); + } + assert_eq!(M::get_max_voters(class), max_voters); + } + } } /// Record needed for every member. @@ -241,6 +260,19 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> T::Origin { + match Self::try_successful_origin() { + Ok(o) => o, + Err(()) => { + let who: T::AccountId = frame_benchmarking::whitelisted_caller(); + crate::Pallet::::do_add_member_to_rank(who.clone(), MIN_RANK) + .expect("failed to add ranked member"); + frame_system::RawOrigin::Signed(who).into() + }, + } + } } /// Guard to ensure that the given origin is a member of the collective. The account ID of the @@ -264,6 +296,19 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> T::Origin { + match Self::try_successful_origin() { + Ok(o) => o, + Err(()) => { + let who: T::AccountId = frame_benchmarking::whitelisted_caller(); + crate::Pallet::::do_add_member_to_rank(who.clone(), MIN_RANK) + .expect("failed to add ranked member"); + frame_system::RawOrigin::Signed(who).into() + }, + } + } } /// Guard to ensure that the given origin is a member of the collective. The pair of including both @@ -287,6 +332,19 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> T::Origin { + match Self::try_successful_origin() { + Ok(o) => o, + Err(()) => { + let who: T::AccountId = frame_benchmarking::whitelisted_caller(); + crate::Pallet::::do_add_member_to_rank(who.clone(), MIN_RANK) + .expect("failed to add ranked member"); + frame_system::RawOrigin::Signed(who).into() + }, + } + } } #[frame_support::pallet] @@ -413,19 +471,10 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::add_member())] - pub fn add_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { + pub fn add_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let _ = T::PromoteOrigin::ensure_origin(origin)?; - ensure!(!Members::::contains_key(&who), Error::::AlreadyMember); - let index = MemberCount::::get(0); - let count = index.checked_add(1).ok_or(Overflow)?; - - Members::::insert(&who, MemberRecord { rank: 0 }); - IdToIndex::::insert(0, &who, index); - IndexToId::::insert(0, index, &who); - MemberCount::::insert(0, count); - Self::deposit_event(Event::MemberAdded { who }); - - Ok(()) + let who = T::Lookup::lookup(who)?; + Self::do_add_member(who) } /// Increment the rank of an existing member by one. @@ -435,19 +484,10 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::promote_member(0))] - pub fn promote_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { + pub fn promote_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let max_rank = T::PromoteOrigin::ensure_origin(origin)?; - let record = Self::ensure_member(&who)?; - let rank = record.rank.checked_add(1).ok_or(Overflow)?; - ensure!(max_rank >= rank, Error::::NoPermission); - let index = MemberCount::::get(rank); - MemberCount::::insert(rank, index.checked_add(1).ok_or(Overflow)?); - IdToIndex::::insert(rank, &who, index); - IndexToId::::insert(rank, index, &who); - Members::::insert(&who, MemberRecord { rank }); - Self::deposit_event(Event::RankChanged { who, rank }); - - Ok(()) + let who = T::Lookup::lookup(who)?; + Self::do_promote_member(who, Some(max_rank)) } /// Decrement the rank of an existing member by one. If the member is already at rank zero, @@ -458,8 +498,9 @@ pub mod pallet { /// /// Weight: `O(1)`, less if the member's index is highest in its rank. #[pallet::weight(T::WeightInfo::demote_member(0))] - pub fn demote_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { + pub fn demote_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let max_rank = T::DemoteOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; let mut record = Self::ensure_member(&who)?; let rank = record.rank; ensure!(max_rank >= rank, Error::::NoPermission); @@ -490,10 +531,11 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_member(*min_rank as u32))] pub fn remove_member( origin: OriginFor, - who: T::AccountId, + who: AccountIdLookupOf, min_rank: Rank, ) -> DispatchResultWithPostInfo { let max_rank = T::DemoteOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; let MemberRecord { rank, .. } = Self::ensure_member(&who)?; ensure!(min_rank >= rank, Error::::InvalidWitness); ensure!(max_rank >= rank, Error::::NoPermission); @@ -626,5 +668,53 @@ pub mod pallet { MemberCount::::mutate(rank, |r| r.saturating_dec()); Ok(()) } + + /// Adds a member into the ranked collective at level 0. + /// + /// No origin checks are executed. + pub fn do_add_member(who: T::AccountId) -> DispatchResult { + ensure!(!Members::::contains_key(&who), Error::::AlreadyMember); + let index = MemberCount::::get(0); + let count = index.checked_add(1).ok_or(Overflow)?; + + Members::::insert(&who, MemberRecord { rank: 0 }); + IdToIndex::::insert(0, &who, index); + IndexToId::::insert(0, index, &who); + MemberCount::::insert(0, count); + Self::deposit_event(Event::MemberAdded { who }); + Ok(()) + } + + /// Promotes a member in the ranked collective into the next role. + /// + /// A `maybe_max_rank` may be provided to check that the member does not get promoted beyond + /// a certain rank. Is `None` is provided, then the rank will be incremented without checks. + pub fn do_promote_member( + who: T::AccountId, + maybe_max_rank: Option, + ) -> DispatchResult { + let record = Self::ensure_member(&who)?; + let rank = record.rank.checked_add(1).ok_or(Overflow)?; + if let Some(max_rank) = maybe_max_rank { + ensure!(max_rank >= rank, Error::::NoPermission); + } + let index = MemberCount::::get(rank); + MemberCount::::insert(rank, index.checked_add(1).ok_or(Overflow)?); + IdToIndex::::insert(rank, &who, index); + IndexToId::::insert(rank, index, &who); + Members::::insert(&who, MemberRecord { rank }); + Self::deposit_event(Event::RankChanged { who, rank }); + Ok(()) + } + + /// Add a member to the rank collective, and continue to promote them until a certain rank + /// is reached. + pub fn do_add_member_to_rank(who: T::AccountId, rank: Rank) -> DispatchResult { + Self::do_add_member(who.clone())?; + for _ in 0..rank { + Self::do_promote_member(who.clone(), None)?; + } + Ok(()) + } } } diff --git a/frame/ranked-collective/src/tests.rs b/frame/ranked-collective/src/tests.rs index 4344a1be730fb..b4173b30b0c2e 100644 --- a/frame/ranked-collective/src/tests.rs +++ b/frame/ranked-collective/src/tests.rs @@ -22,6 +22,7 @@ use std::collections::BTreeMap; use frame_support::{ assert_noop, assert_ok, error::BadOrigin, + pallet_prelude::Weight, parameter_types, traits::{ConstU16, ConstU32, ConstU64, EitherOf, Everything, MapSuccess, Polling}, }; @@ -50,7 +51,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1_000_000); + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1_000_000)); } impl frame_system::Config for Test { type BaseCallFilter = Everything; @@ -455,3 +456,20 @@ fn ensure_ranked_works() { assert_eq!(Rank4::try_origin(Origin::signed(3)).unwrap_err().as_signed().unwrap(), 3); }); } + +#[test] +fn do_add_member_to_rank_works() { + new_test_ext().execute_with(|| { + let max_rank = 9u16; + assert_ok!(Club::do_add_member_to_rank(69, max_rank / 2)); + assert_ok!(Club::do_add_member_to_rank(1337, max_rank)); + for i in 0..=max_rank { + if i <= max_rank / 2 { + assert_eq!(member_count(i), 2); + } else { + assert_eq!(member_count(i), 1); + } + } + assert_eq!(member_count(max_rank + 1), 0); + }) +} diff --git a/frame/ranked-collective/src/weights.rs b/frame/ranked-collective/src/weights.rs index 3048dd804a5e2..a0309daea2263 100644 --- a/frame/ranked-collective/src/weights.rs +++ b/frame/ranked-collective/src/weights.rs @@ -40,7 +40,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_ranked_collective. @@ -61,62 +61,62 @@ impl WeightInfo for SubstrateWeight { // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn add_member() -> Weight { - (11_000_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(11_000_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) fn remove_member(r: u32, ) -> Weight { - (16_855_000 as Weight) + Weight::from_ref_time(16_855_000 as RefTimeWeight) // Standard Error: 27_000 - .saturating_add((8_107_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(8_107_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn promote_member(r: u32, ) -> Weight { - (11_936_000 as Weight) + Weight::from_ref_time(11_936_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((9_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(9_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) fn demote_member(r: u32, ) -> Weight { - (17_582_000 as Weight) + Weight::from_ref_time(17_582_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((142_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedCollective Members (r:1 w:0) // Storage: RankedPolls ReferendumInfoFor (r:1 w:1) // Storage: RankedCollective Voting (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote() -> Weight { - (22_000_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(22_000_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedPolls ReferendumInfoFor (r:1 w:0) // Storage: RankedCollective Voting (r:0 w:1) fn cleanup_poll(n: u32, ) -> Weight { - (6_188_000 as Weight) + Weight::from_ref_time(6_188_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((867_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } } @@ -127,61 +127,61 @@ impl WeightInfo for () { // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn add_member() -> Weight { - (11_000_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(11_000_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) fn remove_member(r: u32, ) -> Weight { - (16_855_000 as Weight) + Weight::from_ref_time(16_855_000 as RefTimeWeight) // Standard Error: 27_000 - .saturating_add((8_107_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(8_107_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn promote_member(r: u32, ) -> Weight { - (11_936_000 as Weight) + Weight::from_ref_time(11_936_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((9_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(9_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) fn demote_member(r: u32, ) -> Weight { - (17_582_000 as Weight) + Weight::from_ref_time(17_582_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((142_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedCollective Members (r:1 w:0) // Storage: RankedPolls ReferendumInfoFor (r:1 w:1) // Storage: RankedCollective Voting (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote() -> Weight { - (22_000_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(22_000_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedPolls ReferendumInfoFor (r:1 w:0) // Storage: RankedCollective Voting (r:0 w:1) fn cleanup_poll(n: u32, ) -> Weight { - (6_188_000 as Weight) + Weight::from_ref_time(6_188_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((867_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } } diff --git a/frame/recovery/src/benchmarking.rs b/frame/recovery/src/benchmarking.rs index 5354de6d10b51..56d5df22d49c5 100644 --- a/frame/recovery/src/benchmarking.rs +++ b/frame/recovery/src/benchmarking.rs @@ -106,22 +106,25 @@ benchmarks! { as_recovered { let caller: T::AccountId = whitelisted_caller(); let recovered_account: T::AccountId = account("recovered_account", 0, SEED); + let recovered_account_lookup = T::Lookup::unlookup(recovered_account.clone()); let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::insert(&caller, &recovered_account); }: _( RawOrigin::Signed(caller), - recovered_account, + recovered_account_lookup, Box::new(call) ) set_recovered { let lost: T::AccountId = whitelisted_caller(); + let lost_lookup = T::Lookup::unlookup(lost.clone()); let rescuer: T::AccountId = whitelisted_caller(); + let rescuer_lookup = T::Lookup::unlookup(rescuer.clone()); }: _( RawOrigin::Root, - lost.clone(), - rescuer.clone() + lost_lookup, + rescuer_lookup ) verify { assert_last_event::( Event::AccountRecovered { @@ -153,11 +156,12 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let lost_account: T::AccountId = account("lost_account", 0, SEED); + let lost_account_lookup = T::Lookup::unlookup(lost_account.clone()); insert_recovery_account::(&caller, &lost_account); }: _( RawOrigin::Signed(caller.clone()), - lost_account.clone() + lost_account_lookup ) verify { assert_last_event::( Event::RecoveryInitiated { @@ -172,7 +176,10 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let lost_account: T::AccountId = account("lost_account", 0, SEED); + let lost_account_lookup = T::Lookup::unlookup(lost_account.clone()); let rescuer_account: T::AccountId = account("rescuer_account", 0, SEED); + let rescuer_account_lookup = T::Lookup::unlookup(rescuer_account.clone()); + // Create friends let friends = add_caller_and_generate_friends::(caller.clone(), n); @@ -206,8 +213,8 @@ benchmarks! { }: _( RawOrigin::Signed(caller.clone()), - lost_account.clone(), - rescuer_account.clone() + lost_account_lookup, + rescuer_account_lookup ) verify { assert_last_event::( Event::RecoveryVouched { @@ -223,6 +230,7 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let lost_account: T::AccountId = account("lost_account", 0, SEED); + let lost_account_lookup = T::Lookup::unlookup(lost_account.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -257,7 +265,7 @@ benchmarks! { >::insert(&lost_account, &caller, recovery_status); }: _( RawOrigin::Signed(caller.clone()), - lost_account.clone() + lost_account_lookup ) verify { assert_last_event::( Event::AccountRecovered { @@ -270,6 +278,7 @@ benchmarks! { close_recovery { let caller: T::AccountId = whitelisted_caller(); let rescuer_account: T::AccountId = account("rescuer_account", 0, SEED); + let rescuer_account_lookup = T::Lookup::unlookup(rescuer_account.clone()); let n in 1 .. T::MaxFriends::get(); @@ -307,7 +316,7 @@ benchmarks! { >::insert(&caller, &rescuer_account, recovery_status); }: _( RawOrigin::Signed(caller.clone()), - rescuer_account.clone() + rescuer_account_lookup ) verify { assert_last_event::( Event::RecoveryClosed { @@ -356,6 +365,7 @@ benchmarks! { cancel_recovered { let caller: T::AccountId = whitelisted_caller(); let account: T::AccountId = account("account", 0, SEED); + let account_lookup = T::Lookup::unlookup(account.clone()); frame_system::Pallet::::inc_providers(&caller); @@ -364,7 +374,7 @@ benchmarks! { Proxy::::insert(&caller, &account); }: _( RawOrigin::Signed(caller), - account + account_lookup ) impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index e844ce91aed00..79eaad6bed566 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -156,7 +156,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_runtime::traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion}; +use sp_runtime::traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion, StaticLookup}; use sp_std::prelude::*; use frame_support::{ @@ -182,6 +182,7 @@ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type FriendsOf = BoundedVec<::AccountId, ::MaxFriends>; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// An active recovery process. #[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -382,10 +383,11 @@ pub mod pallet { )})] pub fn as_recovered( origin: OriginFor, - account: T::AccountId, + account: AccountIdLookupOf, call: Box<::Call>, ) -> DispatchResult { let who = ensure_signed(origin)?; + let account = T::Lookup::lookup(account)?; // Check `who` is allowed to make a call on behalf of `account` let target = Self::proxy(&who).ok_or(Error::::NotAllowed)?; ensure!(target == account, Error::::NotAllowed); @@ -405,10 +407,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_recovered())] pub fn set_recovered( origin: OriginFor, - lost: T::AccountId, - rescuer: T::AccountId, + lost: AccountIdLookupOf, + rescuer: AccountIdLookupOf, ) -> DispatchResult { ensure_root(origin)?; + let lost = T::Lookup::lookup(lost)?; + let rescuer = T::Lookup::lookup(rescuer)?; // Create the recovery storage item. >::insert(&rescuer, &lost); Self::deposit_event(Event::::AccountRecovered { @@ -486,8 +490,12 @@ pub mod pallet { /// - `account`: The lost account that you want to recover. This account needs to be /// recoverable (i.e. have a recovery configuration). #[pallet::weight(T::WeightInfo::initiate_recovery())] - pub fn initiate_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { + pub fn initiate_recovery( + origin: OriginFor, + account: AccountIdLookupOf, + ) -> DispatchResult { let who = ensure_signed(origin)?; + let account = T::Lookup::lookup(account)?; // Check that the account is recoverable ensure!(>::contains_key(&account), Error::::NotRecoverable); // Check that the recovery process has not already been started @@ -528,10 +536,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::vouch_recovery(T::MaxFriends::get()))] pub fn vouch_recovery( origin: OriginFor, - lost: T::AccountId, - rescuer: T::AccountId, + lost: AccountIdLookupOf, + rescuer: AccountIdLookupOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let lost = T::Lookup::lookup(lost)?; + let rescuer = T::Lookup::lookup(rescuer)?; // Get the recovery configuration for the lost account. let recovery_config = Self::recovery_config(&lost).ok_or(Error::::NotRecoverable)?; // Get the active recovery process for the rescuer. @@ -567,8 +577,12 @@ pub mod pallet { /// - `account`: The lost account that you want to claim has been successfully recovered by /// you. #[pallet::weight(T::WeightInfo::claim_recovery(T::MaxFriends::get()))] - pub fn claim_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { + pub fn claim_recovery( + origin: OriginFor, + account: AccountIdLookupOf, + ) -> DispatchResult { let who = ensure_signed(origin)?; + let account = T::Lookup::lookup(account)?; // Get the recovery configuration for the lost account let recovery_config = Self::recovery_config(&account).ok_or(Error::::NotRecoverable)?; @@ -610,8 +624,12 @@ pub mod pallet { /// Parameters: /// - `rescuer`: The account trying to rescue this recoverable account. #[pallet::weight(T::WeightInfo::close_recovery(T::MaxFriends::get()))] - pub fn close_recovery(origin: OriginFor, rescuer: T::AccountId) -> DispatchResult { + pub fn close_recovery( + origin: OriginFor, + rescuer: AccountIdLookupOf, + ) -> DispatchResult { let who = ensure_signed(origin)?; + let rescuer = T::Lookup::lookup(rescuer)?; // Take the active recovery process started by the rescuer for this account. let active_recovery = >::take(&who, &rescuer).ok_or(Error::::NotStarted)?; @@ -665,8 +683,12 @@ pub mod pallet { /// Parameters: /// - `account`: The recovered account you are able to call on-behalf-of. #[pallet::weight(T::WeightInfo::cancel_recovered())] - pub fn cancel_recovered(origin: OriginFor, account: T::AccountId) -> DispatchResult { + pub fn cancel_recovered( + origin: OriginFor, + account: AccountIdLookupOf, + ) -> DispatchResult { let who = ensure_signed(origin)?; + let account = T::Lookup::lookup(account)?; // Check `who` is allowed to make a call on behalf of `account` ensure!(Self::proxy(&who) == Some(account), Error::::NotAllowed); Proxy::::remove(&who); diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 44fc4d72a4a5f..5dc49fff09b32 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -47,7 +47,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/recovery/src/weights.rs b/frame/recovery/src/weights.rs index 0887180a533fc..8b82454d5849d 100644 --- a/frame/recovery/src/weights.rs +++ b/frame/recovery/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_recovery. @@ -60,71 +60,71 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Recovery Proxy (r:1 w:0) fn as_recovered() -> Weight { - (6_579_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(6_579_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Recovery Proxy (r:0 w:1) fn set_recovered() -> Weight { - (13_402_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(13_402_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:1) fn create_recovery(n: u32, ) -> Weight { - (28_217_000 as Weight) + Weight::from_ref_time(28_217_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add((172_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn initiate_recovery() -> Weight { - (34_082_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(34_082_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn vouch_recovery(n: u32, ) -> Weight { - (22_038_000 as Weight) + Weight::from_ref_time(22_038_000 as RefTimeWeight) // Standard Error: 19_000 - .saturating_add((307_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(307_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Proxy (r:1 w:1) fn claim_recovery(n: u32, ) -> Weight { - (28_621_000 as Weight) + Weight::from_ref_time(28_621_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add((353_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(353_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery ActiveRecoveries (r:1 w:1) // Storage: System Account (r:1 w:1) fn close_recovery(n: u32, ) -> Weight { - (33_287_000 as Weight) + Weight::from_ref_time(33_287_000 as RefTimeWeight) // Standard Error: 19_000 - .saturating_add((264_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(264_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Recoverable (r:1 w:1) fn remove_recovery(n: u32, ) -> Weight { - (31_964_000 as Weight) + Weight::from_ref_time(31_964_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add((222_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(222_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Proxy (r:1 w:1) fn cancel_recovered() -> Weight { - (12_702_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(12_702_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -132,70 +132,70 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Recovery Proxy (r:1 w:0) fn as_recovered() -> Weight { - (6_579_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(6_579_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Recovery Proxy (r:0 w:1) fn set_recovered() -> Weight { - (13_402_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(13_402_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:1) fn create_recovery(n: u32, ) -> Weight { - (28_217_000 as Weight) + Weight::from_ref_time(28_217_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add((172_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn initiate_recovery() -> Weight { - (34_082_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(34_082_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn vouch_recovery(n: u32, ) -> Weight { - (22_038_000 as Weight) + Weight::from_ref_time(22_038_000 as RefTimeWeight) // Standard Error: 19_000 - .saturating_add((307_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(307_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Proxy (r:1 w:1) fn claim_recovery(n: u32, ) -> Weight { - (28_621_000 as Weight) + Weight::from_ref_time(28_621_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add((353_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(353_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery ActiveRecoveries (r:1 w:1) // Storage: System Account (r:1 w:1) fn close_recovery(n: u32, ) -> Weight { - (33_287_000 as Weight) + Weight::from_ref_time(33_287_000 as RefTimeWeight) // Standard Error: 19_000 - .saturating_add((264_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(264_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Recoverable (r:1 w:1) fn remove_recovery(n: u32, ) -> Weight { - (31_964_000 as Weight) + Weight::from_ref_time(31_964_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add((222_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(222_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Proxy (r:1 w:1) fn cancel_recovered() -> Weight { - (12_702_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(12_702_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index 571c7675f5cd7..3c7160ac9e73f 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -20,9 +20,10 @@ use super::*; use crate::Pallet as Referenda; use assert_matches::assert_matches; -use frame_benchmarking::{account, benchmarks, whitelist_account}; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account}; use frame_support::{ assert_ok, + dispatch::UnfilteredDispatchable, traits::{Bounded, Currency, EnsureOrigin}, }; use frame_system::RawOrigin; @@ -31,13 +32,13 @@ use sp_runtime::traits::Bounded as ArithBounded; const SEED: u32 = 0; #[allow(dead_code)] -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event, I: 'static>(generic_event: >::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -fn funded_account(name: &'static str, index: u32) -> T::AccountId { +fn funded_account, I: 'static>(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); caller } @@ -47,246 +48,265 @@ fn dummy_call() -> Bounded<::Call> { T::Preimages::bound(call).unwrap() } -fn create_referendum() -> (T::AccountId, ReferendumIndex) { - let caller = funded_account::("caller", 0); - whitelist_account!(caller); - assert_ok!(Referenda::::submit( - RawOrigin::Signed(caller.clone()).into(), - Box::new(RawOrigin::Root.into()), - dummy_call::(), - DispatchTime::After(0u32.into()) - )); - let index = ReferendumCount::::get() - 1; - (caller, index) +fn create_referendum, I: 'static>() -> (T::Origin, ReferendumIndex) { + let origin: T::Origin = T::SubmitOrigin::successful_origin(); + if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + whitelist_account!(caller); + } + + let proposal_origin = Box::new(RawOrigin::Root.into()); + let proposal_hash = dummy_call::(); + let enactment_moment = DispatchTime::After(0u32.into()); + let call = Call::::submit { proposal_origin, proposal_hash, enactment_moment }; + assert_ok!(call.dispatch_bypass_filter(origin.clone())); + let index = ReferendumCount::::get() - 1; + (origin, index) } -fn place_deposit(index: ReferendumIndex) { - let caller = funded_account::("caller", 0); +fn place_deposit, I: 'static>(index: ReferendumIndex) { + let caller = funded_account::("caller", 0); whitelist_account!(caller); - assert_ok!(Referenda::::place_decision_deposit(RawOrigin::Signed(caller).into(), index)); + assert_ok!(Referenda::::place_decision_deposit(RawOrigin::Signed(caller).into(), index)); } -fn nudge(index: ReferendumIndex) { - assert_ok!(Referenda::::nudge_referendum(RawOrigin::Root.into(), index)); +fn nudge, I: 'static>(index: ReferendumIndex) { + assert_ok!(Referenda::::nudge_referendum(RawOrigin::Root.into(), index)); } -fn fill_queue( +fn fill_queue, I: 'static>( index: ReferendumIndex, spaces: u32, pass_after: u32, ) -> Vec { // First, create enough other referendums to fill the track. let mut others = vec![]; - for _ in 0..info::(index).max_deciding { - let (_caller, index) = create_referendum::(); - place_deposit::(index); + for _ in 0..info::(index).max_deciding { + let (_origin, index) = create_referendum::(); + place_deposit::(index); others.push(index); } // We will also need enough referenda which are queued and passing, we want `MaxQueued - 1` // in order to force the maximum amount of work to insert ours into the queue. for _ in spaces..T::MaxQueued::get() { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - make_passing_after::(index, Perbill::from_percent(pass_after)); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + make_passing_after::(index, Perbill::from_percent(pass_after)); others.push(index); } // Skip to when they can start being decided. - skip_prepare_period::(index); + skip_prepare_period::(index); // Manually nudge the other referenda first to ensure that they begin. - others.iter().for_each(|&i| nudge::(i)); + others.iter().for_each(|&i| nudge::(i)); others } -fn info(index: ReferendumIndex) -> &'static TrackInfoOf { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn info, I: 'static>(index: ReferendumIndex) -> &'static TrackInfoOf { + let status = Referenda::::ensure_ongoing(index).unwrap(); T::Tracks::info(status.track).expect("Id value returned from T::Tracks") } -fn make_passing_after(index: ReferendumIndex, period_portion: Perbill) { - let support = info::(index).min_support.threshold(period_portion); - let approval = info::(index).min_approval.threshold(period_portion); - Referenda::::access_poll(index, |status| { +fn make_passing_after, I: 'static>(index: ReferendumIndex, period_portion: Perbill) { + // We add an extra 1 percent to handle any perbill rounding errors which may cause + // a proposal to not actually pass. + let support = info::(index) + .min_support + .threshold(period_portion) + .saturating_add(Perbill::from_percent(1)); + let approval = info::(index) + .min_approval + .threshold(period_portion) + .saturating_add(Perbill::from_percent(1)); + Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { + T::Tally::setup(class, Perbill::from_rational(1u32, 1000u32)); *tally = T::Tally::from_requirements(support, approval, class); } }); } -fn make_passing(index: ReferendumIndex) { - Referenda::::access_poll(index, |status| { +fn make_passing, I: 'static>(index: ReferendumIndex) { + Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { + T::Tally::setup(class, Perbill::from_rational(1u32, 1000u32)); *tally = T::Tally::unanimity(class); } }); } -fn make_failing(index: ReferendumIndex) { - Referenda::::access_poll(index, |status| { +fn make_failing, I: 'static>(index: ReferendumIndex) { + Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { + T::Tally::setup(class, Perbill::from_rational(1u32, 1000u32)); *tally = T::Tally::rejection(class); } }); } -fn skip_prepare_period(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); - let prepare_period_over = status.submitted + info::(index).prepare_period; +fn skip_prepare_period, I: 'static>(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); + let prepare_period_over = status.submitted + info::(index).prepare_period; frame_system::Pallet::::set_block_number(prepare_period_over); } -fn skip_decision_period(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); - let decision_period_over = status.deciding.unwrap().since + info::(index).decision_period; +fn skip_decision_period, I: 'static>(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); + let decision_period_over = status.deciding.unwrap().since + info::(index).decision_period; frame_system::Pallet::::set_block_number(decision_period_over); } -fn skip_confirm_period(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn skip_confirm_period, I: 'static>(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); let confirm_period_over = status.deciding.unwrap().confirming.unwrap(); frame_system::Pallet::::set_block_number(confirm_period_over); } -fn skip_timeout_period(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn skip_timeout_period, I: 'static>(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); let timeout_period_over = status.submitted + T::UndecidingTimeout::get(); frame_system::Pallet::::set_block_number(timeout_period_over); } -fn alarm_time(index: ReferendumIndex) -> T::BlockNumber { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn alarm_time, I: 'static>(index: ReferendumIndex) -> T::BlockNumber { + let status = Referenda::::ensure_ongoing(index).unwrap(); status.alarm.unwrap().0 } -fn is_confirming(index: ReferendumIndex) -> bool { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn is_confirming, I: 'static>(index: ReferendumIndex) -> bool { + let status = Referenda::::ensure_ongoing(index).unwrap(); matches!( status, ReferendumStatus { deciding: Some(DecidingStatus { confirming: Some(_), .. }), .. } ) } -fn is_not_confirming(index: ReferendumIndex) -> bool { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn is_not_confirming, I: 'static>(index: ReferendumIndex) -> bool { + let status = Referenda::::ensure_ongoing(index).unwrap(); matches!( status, ReferendumStatus { deciding: Some(DecidingStatus { confirming: None, .. }), .. } ) } -benchmarks! { +benchmarks_instance_pallet! { submit { - let caller = funded_account::("caller", 0); - whitelist_account!(caller); + let origin: T::Origin = T::SubmitOrigin::successful_origin(); + if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + whitelist_account!(caller); + } let call = dummy_call::(); - }: _( - RawOrigin::Signed(caller), + }: _( + origin, Box::new(RawOrigin::Root.into()), call, DispatchTime::After(0u32.into()) ) verify { - let index = ReferendumCount::::get().checked_sub(1).unwrap(); - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Ongoing(_))); + let index = ReferendumCount::::get().checked_sub(1).unwrap(); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Ongoing(_))); } place_decision_deposit_preparing { - let (caller, index) = create_referendum::(); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + let (origin, index) = create_referendum::(); + }: place_decision_deposit(origin, index) verify { - assert!(Referenda::::ensure_ongoing(index).unwrap().decision_deposit.is_some()); + assert!(Referenda::::ensure_ongoing(index).unwrap().decision_deposit.is_some()); } place_decision_deposit_queued { - let (caller, index) = create_referendum::(); - fill_queue::(index, 1, 90); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + let (origin, index) = create_referendum::(); + fill_queue::(index, 1, 90); + }: place_decision_deposit(origin, index) verify { - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).contains(&(index, 0u32.into()))); } place_decision_deposit_not_queued { - let (caller, index) = create_referendum::(); - fill_queue::(index, 0, 90); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + let (origin, index) = create_referendum::(); + fill_queue::(index, 0, 90); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + }: place_decision_deposit(origin, index) verify { - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); } place_decision_deposit_passing { - let (caller, index) = create_referendum::(); - skip_prepare_period::(index); - make_passing::(index); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + let (origin, index) = create_referendum::(); + skip_prepare_period::(index); + make_passing::(index); + }: place_decision_deposit(origin, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } place_decision_deposit_failing { - let (caller, index) = create_referendum::(); - skip_prepare_period::(index); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + let (origin, index) = create_referendum::(); + skip_prepare_period::(index); + }: place_decision_deposit(origin, index) verify { - assert!(is_not_confirming::(index)); + assert!(is_not_confirming::(index)); } refund_decision_deposit { - let (caller, index) = create_referendum::(); - place_deposit::(index); - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); - }: _(RawOrigin::Signed(caller), index) + let (origin, index) = create_referendum::(); + place_deposit::(index); + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); + }: _(origin, index) verify { - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(_, _, None))); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(_, _, None))); } cancel { - let (_caller, index) = create_referendum::(); - place_deposit::(index); + let (_origin, index) = create_referendum::(); + place_deposit::(index); }: _(T::CancelOrigin::successful_origin(), index) verify { - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(..))); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(..))); } kill { - let (_caller, index) = create_referendum::(); - place_deposit::(index); + let (_origin, index) = create_referendum::(); + place_deposit::(index); }: _(T::KillOrigin::successful_origin(), index) verify { - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Killed(..))); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Killed(..))); } one_fewer_deciding_queue_empty { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - nudge::(index); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); - assert_eq!(DecidingCount::::get(&track), 1); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + nudge::(index); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); + assert_eq!(DecidingCount::::get(&track), 1); }: one_fewer_deciding(RawOrigin::Root, track) verify { - assert_eq!(DecidingCount::::get(&track), 0); + assert_eq!(DecidingCount::::get(&track), 0); } one_fewer_deciding_failing { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); // No spaces free in the queue. - let queued = fill_queue::(index, 0, 90); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - let deciding_count = DecidingCount::::get(&track); + let queued = fill_queue::(index, 0, 90); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + let deciding_count = DecidingCount::::get(&track); }: one_fewer_deciding(RawOrigin::Root, track) verify { - assert_eq!(DecidingCount::::get(&track), deciding_count); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) + assert_eq!(DecidingCount::::get(&track), deciding_count); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); + assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) .unwrap() .deciding .map_or(true, |d| d.confirming.is_none()) @@ -294,63 +314,63 @@ benchmarks! { } one_fewer_deciding_passing { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); // No spaces free in the queue. - let queued = fill_queue::(index, 0, 0); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - let deciding_count = DecidingCount::::get(&track); + let queued = fill_queue::(index, 0, 0); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + let deciding_count = DecidingCount::::get(&track); }: one_fewer_deciding(RawOrigin::Root, track) verify { - assert_eq!(DecidingCount::::get(&track), deciding_count); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) + assert_eq!(DecidingCount::::get(&track), deciding_count); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); + assert!(queued.into_iter().skip(1).filter(|i| Referenda::::ensure_ongoing(*i) .unwrap() .deciding - .map_or(true, |d| d.confirming.is_some()) - )); + .map_or(false, |d| d.confirming.is_some()) + ).count() == 1); } nudge_referendum_requeued_insertion { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 0, 90); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 0, 90); // Now nudge ours, with the track now full and the queue full of referenda with votes, // ours will not be in the queue. - nudge::(index); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + nudge::(index); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); // Now alter the voting, so that ours goes into pole-position and shifts others down. - make_passing::(index); + make_passing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let t = TrackQueue::::get(&track); + let t = TrackQueue::::get(&track); assert_eq!(t.len() as u32, T::MaxQueued::get()); assert_eq!(t[t.len() - 1].0, index); } nudge_referendum_requeued_slide { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 1, 90); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 1, 90); // Now nudge ours, with the track now full, ours will be queued, but with no votes, it // will have the worst position. - nudge::(index); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + nudge::(index); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); // Now alter the voting, so that ours leap-frogs all into the best position. - make_passing::(index); + make_passing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let t = TrackQueue::::get(&track); + let t = TrackQueue::::get(&track); assert_eq!(t.len() as u32, T::MaxQueued::get()); assert_eq!(t[t.len() - 1].0, index); } @@ -361,159 +381,159 @@ benchmarks! { // insertion at the beginning. // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 1, 0); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 1, 0); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); + assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); // Then nudge ours, with the track now full, ours will be queued. }: nudge_referendum(RawOrigin::Root, index) verify { - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); } nudge_referendum_not_queued { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 0, 0); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 0, 0); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); // Then nudge ours, with the track now full, ours will be queued. }: nudge_referendum(RawOrigin::Root, index) verify { - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); } nudge_referendum_no_deposit { - let (_caller, index) = create_referendum::(); - skip_prepare_period::(index); + let (_origin, index) = create_referendum::(); + skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let status = Referenda::::ensure_ongoing(index).unwrap(); + let status = Referenda::::ensure_ongoing(index).unwrap(); assert_matches!(status, ReferendumStatus { deciding: None, .. }); } nudge_referendum_preparing { - let (_caller, index) = create_referendum::(); - place_deposit::(index); + let (_origin, index) = create_referendum::(); + place_deposit::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let status = Referenda::::ensure_ongoing(index).unwrap(); + let status = Referenda::::ensure_ongoing(index).unwrap(); assert_matches!(status, ReferendumStatus { deciding: None, .. }); } nudge_referendum_timed_out { - let (_caller, index) = create_referendum::(); - skip_timeout_period::(index); + let (_origin, index) = create_referendum::(); + skip_timeout_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let info = ReferendumInfoFor::::get(index).unwrap(); + let info = ReferendumInfoFor::::get(index).unwrap(); assert_matches!(info, ReferendumInfo::TimedOut(..)); } nudge_referendum_begin_deciding_failing { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_not_confirming::(index)); + assert!(is_not_confirming::(index)); } nudge_referendum_begin_deciding_passing { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - make_passing::(index); - skip_prepare_period::(index); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + make_passing::(index); + skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } nudge_referendum_begin_confirming { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - nudge::(index); - assert!(!is_confirming::(index)); - make_passing::(index); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + nudge::(index); + assert!(!is_confirming::(index)); + make_passing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } nudge_referendum_end_confirming { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - make_passing::(index); - nudge::(index); - assert!(is_confirming::(index)); - make_failing::(index); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + make_passing::(index); + nudge::(index); + assert!(is_confirming::(index)); + make_failing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(!is_confirming::(index)); + assert!(!is_confirming::(index)); } nudge_referendum_continue_not_confirming { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - nudge::(index); - assert!(!is_confirming::(index)); - let old_alarm = alarm_time::(index); - make_passing_after::(index, Perbill::from_percent(50)); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + nudge::(index); + assert!(!is_confirming::(index)); + let old_alarm = alarm_time::(index); + make_passing_after::(index, Perbill::from_percent(50)); }: nudge_referendum(RawOrigin::Root, index) verify { - assert_ne!(old_alarm, alarm_time::(index)); - assert!(!is_confirming::(index)); + assert_ne!(old_alarm, alarm_time::(index)); + assert!(!is_confirming::(index)); } nudge_referendum_continue_confirming { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - make_passing::(index); - skip_prepare_period::(index); - nudge::(index); - assert!(is_confirming::(index)); - let old_alarm = alarm_time::(index); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + make_passing::(index); + skip_prepare_period::(index); + nudge::(index); + assert!(is_confirming::(index)); + let old_alarm = alarm_time::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } nudge_referendum_approved { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - make_passing::(index); - nudge::(index); - skip_confirm_period::(index); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + make_passing::(index); + nudge::(index); + skip_confirm_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let info = ReferendumInfoFor::::get(index).unwrap(); + let info = ReferendumInfoFor::::get(index).unwrap(); assert_matches!(info, ReferendumInfo::Approved(..)); } nudge_referendum_rejected { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - make_failing::(index); - nudge::(index); - skip_decision_period::(index); + let (_origin, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + make_failing::(index); + nudge::(index); + skip_decision_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let info = ReferendumInfoFor::::get(index).unwrap(); + let info = ReferendumInfoFor::::get(index).unwrap(); assert_matches!(info, ReferendumInfo::Rejected(..)); } diff --git a/frame/referenda/src/branch.rs b/frame/referenda/src/branch.rs index f381f5fe5b709..172b5af999df5 100644 --- a/frame/referenda/src/branch.rs +++ b/frame/referenda/src/branch.rs @@ -19,6 +19,7 @@ use super::Config; use crate::weights::WeightInfo; +use frame_support::weights::Weight; /// Branches within the `begin_deciding` function. pub enum BeginDecidingBranch { @@ -82,7 +83,8 @@ impl ServiceBranch { /// Return the maximum possible weight of the `nudge` function. pub fn max_weight_of_nudge, I: 'static>() -> frame_support::weights::Weight { - 0.max(T::WeightInfo::nudge_referendum_no_deposit()) + Weight::new() + .max(T::WeightInfo::nudge_referendum_no_deposit()) .max(T::WeightInfo::nudge_referendum_preparing()) .max(T::WeightInfo::nudge_referendum_queued()) .max(T::WeightInfo::nudge_referendum_not_queued()) @@ -105,7 +107,7 @@ impl ServiceBranch { self, ) -> Option { use ServiceBranch::*; - Some(match self { + let ref_time_weight = match self { Preparing => T::WeightInfo::place_decision_deposit_preparing(), Queued => T::WeightInfo::place_decision_deposit_queued(), NotQueued => T::WeightInfo::place_decision_deposit_not_queued(), @@ -122,12 +124,15 @@ impl ServiceBranch { TimedOut | Fail | NoDeposit => return None, - }) + }; + + Some(ref_time_weight) } /// Return the maximum possible weight of the `place_decision_deposit` function. pub fn max_weight_of_deposit, I: 'static>() -> frame_support::weights::Weight { - 0.max(T::WeightInfo::place_decision_deposit_preparing()) + Weight::new() + .max(T::WeightInfo::place_decision_deposit_preparing()) .max(T::WeightInfo::place_decision_deposit_queued()) .max(T::WeightInfo::place_decision_deposit_not_queued()) .max(T::WeightInfo::place_decision_deposit_passing()) @@ -167,7 +172,8 @@ impl OneFewerDecidingBranch { /// Return the maximum possible weight of the `one_fewer_deciding` function. pub fn max_weight, I: 'static>() -> frame_support::weights::Weight { - 0.max(T::WeightInfo::one_fewer_deciding_queue_empty()) + Weight::new() + .max(T::WeightInfo::one_fewer_deciding_queue_empty()) .max(T::WeightInfo::one_fewer_deciding_passing()) .max(T::WeightInfo::one_fewer_deciding_failing()) } diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index 0c514f13c095d..cdf9e8077ae6c 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -699,7 +699,8 @@ impl, I: 'static> Pallet { when: T::BlockNumber, ) -> Option<(T::BlockNumber, ScheduleAddressOf)> { let alarm_interval = T::AlarmInterval::get().max(One::one()); - let when = (when + alarm_interval - One::one()) / alarm_interval * alarm_interval; + let when = when.saturating_add(alarm_interval).saturating_sub(One::one()) / + (alarm_interval.saturating_mul(alarm_interval)).max(One::one()); let maybe_result = T::Scheduler::schedule( DispatchTime::At(when), None, @@ -752,7 +753,8 @@ impl, I: 'static> Pallet { None }; let deciding_status = DecidingStatus { since: now, confirming }; - let alarm = Self::decision_time(&deciding_status, &status.tally, status.track, track); + let alarm = Self::decision_time(&deciding_status, &status.tally, status.track, track) + .max(now.saturating_add(One::one())); status.deciding = Some(deciding_status); let branch = if is_passing { BeginDecidingBranch::Passing } else { BeginDecidingBranch::Failing }; diff --git a/frame/referenda/src/mock.rs b/frame/referenda/src/mock.rs index 6ff9a36b7c3fa..4b76a69720be9 100644 --- a/frame/referenda/src/mock.rs +++ b/frame/referenda/src/mock.rs @@ -26,6 +26,7 @@ use frame_support::{ ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnInitialize, OriginTrait, Polling, SortedMembers, }, + weights::Weight, }; use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_core::H256; @@ -61,8 +62,9 @@ impl Contains for BaseFilter { } parameter_types! { + pub MaxWeight: Weight = Weight::from_ref_time(2_000_000_000_000); pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1_000_000); + frame_system::limits::BlockWeights::simple_max(MaxWeight::get()); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; @@ -103,7 +105,7 @@ impl pallet_scheduler::Config for Test { type Origin = Origin; type PalletsOrigin = OriginCaller; type Call = Call; - type MaximumWeight = ConstU64<2_000_000_000_000>; + type MaximumWeight = MaxWeight; type ScheduleOrigin = EnsureRoot; type MaxScheduledPerBlock = ConstU32<100>; type WeightInfo = (); @@ -288,6 +290,9 @@ impl VoteTally for Tally { let nays = ((ayes as u64) * 1_000_000_000u64 / approval.deconstruct() as u64) as u32 - ayes; Self { ayes, nays } } + + #[cfg(feature = "runtime-benchmarks")] + fn setup(_: Class, _: Perbill) {} } pub fn set_balance_proposal(value: u64) -> Vec { diff --git a/frame/referenda/src/weights.rs b/frame/referenda/src/weights.rs index d48ebb1014d48..84a726d9e4fbe 100644 --- a/frame/referenda/src/weights.rs +++ b/frame/referenda/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_referenda. @@ -80,205 +80,205 @@ impl WeightInfo for SubstrateWeight { // Storage: Scheduler Agenda (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:0 w:1) fn submit() -> Weight { - (34_640_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(34_640_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_preparing() -> Weight { - (44_290_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(44_290_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_queued() -> Weight { - (49_428_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(49_428_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_not_queued() -> Weight { - (50_076_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(50_076_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_passing() -> Weight { - (55_935_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(55_935_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_failing() -> Weight { - (52_921_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(52_921_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn refund_decision_deposit() -> Weight { - (29_160_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(29_160_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn cancel() -> Weight { - (34_972_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(34_972_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn kill() -> Weight { - (60_620_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(60_620_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda TrackQueue (r:1 w:0) // Storage: Referenda DecidingCount (r:1 w:1) fn one_fewer_deciding_queue_empty() -> Weight { - (9_615_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(9_615_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_failing() -> Weight { - (113_077_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(113_077_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_passing() -> Weight { - (114_376_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(114_376_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_insertion() -> Weight { - (43_901_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(43_901_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_slide() -> Weight { - (43_279_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(43_279_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_queued() -> Weight { - (45_564_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(45_564_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_not_queued() -> Weight { - (45_061_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(45_061_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_no_deposit() -> Weight { - (23_757_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(23_757_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_preparing() -> Weight { - (24_781_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(24_781_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn nudge_referendum_timed_out() -> Weight { - (18_344_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_344_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_failing() -> Weight { - (34_752_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(34_752_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_passing() -> Weight { - (37_055_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(37_055_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_confirming() -> Weight { - (31_442_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(31_442_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_end_confirming() -> Weight { - (33_201_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(33_201_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_not_confirming() -> Weight { - (30_047_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(30_047_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_confirming() -> Weight { - (29_195_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_195_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn nudge_referendum_approved() -> Weight { - (50_119_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(50_119_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_rejected() -> Weight { - (32_203_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(32_203_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -288,204 +288,204 @@ impl WeightInfo for () { // Storage: Scheduler Agenda (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:0 w:1) fn submit() -> Weight { - (34_640_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(34_640_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_preparing() -> Weight { - (44_290_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(44_290_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_queued() -> Weight { - (49_428_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(49_428_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_not_queued() -> Weight { - (50_076_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(50_076_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_passing() -> Weight { - (55_935_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(55_935_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_failing() -> Weight { - (52_921_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(52_921_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn refund_decision_deposit() -> Weight { - (29_160_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(29_160_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn cancel() -> Weight { - (34_972_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(34_972_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn kill() -> Weight { - (60_620_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(60_620_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda TrackQueue (r:1 w:0) // Storage: Referenda DecidingCount (r:1 w:1) fn one_fewer_deciding_queue_empty() -> Weight { - (9_615_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(9_615_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_failing() -> Weight { - (113_077_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(113_077_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_passing() -> Weight { - (114_376_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(114_376_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_insertion() -> Weight { - (43_901_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(43_901_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_slide() -> Weight { - (43_279_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(43_279_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_queued() -> Weight { - (45_564_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(45_564_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_not_queued() -> Weight { - (45_061_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(45_061_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_no_deposit() -> Weight { - (23_757_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(23_757_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_preparing() -> Weight { - (24_781_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(24_781_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn nudge_referendum_timed_out() -> Weight { - (18_344_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_344_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_failing() -> Weight { - (34_752_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(34_752_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_passing() -> Weight { - (37_055_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(37_055_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_confirming() -> Weight { - (31_442_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(31_442_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_end_confirming() -> Weight { - (33_201_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(33_201_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_not_confirming() -> Weight { - (30_047_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(30_047_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_confirming() -> Weight { - (29_195_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_195_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn nudge_referendum_approved() -> Weight { - (50_119_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(50_119_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_rejected() -> Weight { - (32_203_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(32_203_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/remark/src/tests.rs b/frame/remark/src/tests.rs index 60a376c5afca5..580b64cc64997 100644 --- a/frame/remark/src/tests.rs +++ b/frame/remark/src/tests.rs @@ -28,13 +28,16 @@ fn generates_event() { let caller = 1; let data = vec![0u8; 100]; System::set_block_number(System::block_number() + 1); //otherwise event won't be registered. - assert_ok!(Remark::::store(RawOrigin::Signed(caller.clone()).into(), data.clone(),)); + assert_ok!(Remark::::store(RawOrigin::Signed(caller).into(), data.clone(),)); let events = System::events(); + // this one we create as we expect it let system_event: ::Event = Event::Stored { content_hash: sp_io::hashing::blake2_256(&data).into(), sender: caller, } .into(); + // this one we actually go into the system pallet and get the last event + // because we know its there from block +1 let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); }); @@ -47,7 +50,7 @@ fn does_not_store_empty() { let data = vec![]; System::set_block_number(System::block_number() + 1); //otherwise event won't be registered. assert_noop!( - Remark::::store(RawOrigin::Signed(caller.clone()).into(), data.clone(),), + Remark::::store(RawOrigin::Signed(caller).into(), data.clone(),), Error::::Empty ); assert!(System::events().is_empty()); diff --git a/frame/remark/src/weights.rs b/frame/remark/src/weights.rs index b8bd4618f8def..a098670ccf100 100644 --- a/frame/remark/src/weights.rs +++ b/frame/remark/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_remark. @@ -52,10 +52,10 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn store(l: u32, ) -> Weight { - (13_140_000 as Weight) + Weight::from_ref_time(13_140_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } } @@ -63,9 +63,9 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn store(l: u32, ) -> Weight { - (13_140_000 as Weight) + Weight::from_ref_time(13_140_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } } diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 7b7957071de7b..7afdeb1fed4e4 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -24,6 +24,7 @@ use frame_support::{ traits::{schedule::Priority, BoundedInline}, }; use frame_system::RawOrigin; +use sp_runtime::traits::Hash; use sp_std::{prelude::*, vec}; use crate::Pallet as Scheduler; @@ -33,6 +34,8 @@ const SEED: u32 = 0; const BLOCK_NUMBER: u32 = 2; +type SystemOrigin = ::Origin; + /// Add `n` items to the schedule. /// /// For `resolved`: @@ -227,7 +230,8 @@ benchmarks! { let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); fill_schedule::(when, s)?; - }: _(RawOrigin::Root, when, periodic, priority, call) + let schedule_origin = T::ScheduleOrigin::successful_origin(); + }: _>(schedule_origin, when, periodic, priority, call) verify { ensure!( Agenda::::get(when).len() == (s + 1) as usize, @@ -241,7 +245,8 @@ benchmarks! { fill_schedule::(when, s)?; assert_eq!(Agenda::::get(when).len(), s as usize); - }: _(RawOrigin::Root, when, 0) + let schedule_origin = T::ScheduleOrigin::successful_origin(); + }: _>(schedule_origin, when, 0) verify { ensure!( Lookup::::get(u32_to_name(0)).is_none(), @@ -264,7 +269,8 @@ benchmarks! { let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); fill_schedule::(when, s)?; - }: _(RawOrigin::Root, id, when, periodic, priority, call) + let schedule_origin = T::ScheduleOrigin::successful_origin(); + }: _>(RawOrigin::Root, id, when, periodic, priority, call) verify { ensure!( Agenda::::get(when).len() == (s + 1) as usize, @@ -277,7 +283,8 @@ benchmarks! { let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; - }: _(RawOrigin::Root, u32_to_name(0)) + let schedule_origin = T::ScheduleOrigin::successful_origin(); + }: _>(RawOrigin::Root, u32_to_name(0)) verify { ensure!( Lookup::::get(u32_to_name(0)).is_none(), diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index 8b3769b90b020..560149d73f82c 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -118,7 +118,7 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2_000_000_000_000); + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(2_000_000_000_000)); } impl system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index fe17d02b7272c..a332586265430 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -30,7 +30,7 @@ use substrate_test_utils::assert_eq_uvec; #[test] fn basic_scheduling_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: 10 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), @@ -51,7 +51,7 @@ fn basic_scheduling_works() { #[test] fn scheduling_with_preimages_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: 10 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); let hash = ::Hashing::hash_of(&call); let len = call.using_encoded(|x| x.len()) as u32; let hashed = Preimage::pick(hash.clone(), len); @@ -73,7 +73,7 @@ fn scheduling_with_preimages_works() { fn schedule_after_works() { new_test_ext().execute_with(|| { run_to_block(2); - let call = Call::Logger(LoggerCall::log { i: 42, weight: 10 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); // This will schedule the call 3 blocks after the next block... so block 3 + 3 = 6 assert_ok!(Scheduler::do_schedule( @@ -96,7 +96,7 @@ fn schedule_after_works() { fn schedule_after_zero_works() { new_test_ext().execute_with(|| { run_to_block(2); - let call = Call::Logger(LoggerCall::log { i: 42, weight: 10 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); assert_ok!(Scheduler::do_schedule( DispatchTime::After(0), @@ -122,7 +122,11 @@ fn periodic_scheduling_works() { Some((3, 3)), 127, root(), - Preimage::bound(Call::Logger(logger::Call::log { i: 42, weight: 10 })).unwrap() + Preimage::bound(Call::Logger(logger::Call::log { + i: 42, + weight: Weight::from_ref_time(10) + })) + .unwrap() )); run_to_block(3); assert!(logger::log().is_empty()); @@ -144,7 +148,7 @@ fn periodic_scheduling_works() { #[test] fn reschedule_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: 10 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule( @@ -182,7 +186,7 @@ fn reschedule_works() { #[test] fn reschedule_named_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: 10 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule_named( @@ -221,7 +225,7 @@ fn reschedule_named_works() { #[test] fn reschedule_named_perodic_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: 10 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule_named( @@ -277,7 +281,11 @@ fn cancel_named_scheduling_works_with_normal_cancel() { None, 127, root(), - Preimage::bound(Call::Logger(LoggerCall::log { i: 69, weight: 10 })).unwrap(), + Preimage::bound(Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), ) .unwrap(); let i = Scheduler::do_schedule( @@ -285,7 +293,11 @@ fn cancel_named_scheduling_works_with_normal_cancel() { None, 127, root(), - Preimage::bound(Call::Logger(LoggerCall::log { i: 42, weight: 10 })).unwrap(), + Preimage::bound(Call::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })) + .unwrap(), ) .unwrap(); run_to_block(3); @@ -307,7 +319,11 @@ fn cancel_named_periodic_scheduling_works() { Some((3, 3)), 127, root(), - Preimage::bound(Call::Logger(LoggerCall::log { i: 42, weight: 10 })).unwrap(), + Preimage::bound(Call::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })) + .unwrap(), ) .unwrap(); // same id results in error. @@ -317,7 +333,11 @@ fn cancel_named_periodic_scheduling_works() { None, 127, root(), - Preimage::bound(Call::Logger(LoggerCall::log { i: 69, weight: 10 })).unwrap(), + Preimage::bound(Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10) + })) + .unwrap(), ) .is_err()); // different id is ok. @@ -327,7 +347,11 @@ fn cancel_named_periodic_scheduling_works() { None, 127, root(), - Preimage::bound(Call::Logger(LoggerCall::log { i: 69, weight: 10 })).unwrap(), + Preimage::bound(Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), ) .unwrap(); run_to_block(3); @@ -438,7 +462,7 @@ fn on_initialize_weight_is_correct() { let call_weight = 25; // Named - let call = Call::Logger(LoggerCall::log { i: 3, weight: call_weight + 1 }); + let call = Call::Logger(LoggerCall::log { i: 3, weight: call_weight + Weight::one() }); assert_ok!(Scheduler::do_schedule_named( [1u8; 32], DispatchTime::At(3), @@ -447,7 +471,8 @@ fn on_initialize_weight_is_correct() { root(), Preimage::bound(call).unwrap(), )); - let call = Call::Logger(LoggerCall::log { i: 42, weight: call_weight + 2 }); + let call = + Call::Logger(LoggerCall::log { i: 42, weight: call_weight + Weight::from_ref_time(2) }); // Anon Periodic assert_ok!(Scheduler::do_schedule( DispatchTime::At(2), @@ -456,7 +481,8 @@ fn on_initialize_weight_is_correct() { root(), Preimage::bound(call).unwrap(), )); - let call = Call::Logger(LoggerCall::log { i: 69, weight: call_weight + 3 }); + let call = + Call::Logger(LoggerCall::log { i: 69, weight: call_weight + Weight::from_ref_time(3) }); // Anon assert_ok!(Scheduler::do_schedule( DispatchTime::At(2), @@ -466,7 +492,10 @@ fn on_initialize_weight_is_correct() { Preimage::bound(call).unwrap(), )); // Named Periodic - let call = Call::Logger(LoggerCall::log { i: 2600, weight: call_weight + 4 }); + let call = Call::Logger(LoggerCall::log { + i: 2600, + weight: call_weight + Weight::from_ref_time(4), + }); assert_ok!(Scheduler::do_schedule_named( [2u8; 32], DispatchTime::At(1), @@ -483,7 +512,7 @@ fn on_initialize_weight_is_correct() { TestWeightInfo::service_agenda(1) + ::service_task(None, true, true) + TestWeightInfo::execute_dispatch_unsigned() + - call_weight + 4 + call_weight + Weight::from_ref_time(4) ); assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32)]); @@ -495,10 +524,10 @@ fn on_initialize_weight_is_correct() { TestWeightInfo::service_agenda(2) + ::service_task(None, false, true) + TestWeightInfo::execute_dispatch_unsigned() + - call_weight + 3 + ::service_task( - None, false, false - ) + TestWeightInfo::execute_dispatch_unsigned() + - call_weight + 2 + call_weight + Weight::from_ref_time(2) + + ::service_task(None, false, false) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(3) ); assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); @@ -510,7 +539,7 @@ fn on_initialize_weight_is_correct() { TestWeightInfo::service_agenda(1) + ::service_task(None, true, false) + TestWeightInfo::execute_dispatch_unsigned() + - call_weight + 1 + call_weight + Weight::one() ); assert_eq!(IncompleteSince::::get(), None); assert_eq!( @@ -530,8 +559,10 @@ fn on_initialize_weight_is_correct() { #[test] fn root_calls_works() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 10 })); - let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 10 })); + let call = + Box::new(Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(10) })); + let call2 = + Box::new(Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) })); assert_ok!(Scheduler::schedule_named(Origin::root(), [1u8; 32], 4, None, 127, call,)); assert_ok!(Scheduler::schedule(Origin::root(), 4, None, 127, call2)); run_to_block(3); @@ -551,9 +582,12 @@ fn fails_to_schedule_task_in_the_past() { new_test_ext().execute_with(|| { run_to_block(3); - let call1 = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 10 })); - let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 10 })); - let call3 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 10 })); + let call1 = + Box::new(Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(10) })); + let call2 = + Box::new(Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) })); + let call3 = + Box::new(Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) })); assert_err!( Scheduler::schedule_named(Origin::root(), [1u8; 32], 2, None, 127, call1), @@ -575,8 +609,10 @@ fn fails_to_schedule_task_in_the_past() { #[test] fn should_use_orign() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 10 })); - let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 10 })); + let call = + Box::new(Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(10) })); + let call2 = + Box::new(Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) })); assert_ok!(Scheduler::schedule_named( system::RawOrigin::Signed(1).into(), [1u8; 32], @@ -601,8 +637,10 @@ fn should_use_orign() { #[test] fn should_check_orign() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 10 })); - let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 10 })); + let call = + Box::new(Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(10) })); + let call2 = + Box::new(Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) })); assert_noop!( Scheduler::schedule_named( system::RawOrigin::Signed(2).into(), @@ -624,8 +662,14 @@ fn should_check_orign() { #[test] fn should_check_origin_for_cancel() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Logger(LoggerCall::log_without_filter { i: 69, weight: 10 })); - let call2 = Box::new(Call::Logger(LoggerCall::log_without_filter { i: 42, weight: 10 })); + let call = Box::new(Call::Logger(LoggerCall::log_without_filter { + i: 69, + weight: Weight::from_ref_time(10), + })); + let call2 = Box::new(Call::Logger(LoggerCall::log_without_filter { + i: 42, + weight: Weight::from_ref_time(10), + })); assert_ok!(Scheduler::schedule_named( system::RawOrigin::Signed(1).into(), [1u8; 32], @@ -666,14 +710,20 @@ fn migration_to_v4_works() { Some(ScheduledV1 { maybe_id: None, priority: i as u8 + 10, - call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }), + call: Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + }), maybe_periodic: None, }), None, Some(ScheduledV1 { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { i: 69, weight: 10 }), + call: Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + }), maybe_periodic: Some((456u64, 10)), }), ]; @@ -691,8 +741,11 @@ fn migration_to_v4_works() { Some(ScheduledOf:: { maybe_id: None, priority: 10, - call: Preimage::bound(Call::Logger(LoggerCall::log { i: 96, weight: 100 })) - .unwrap(), + call: Preimage::bound(Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + })) + .unwrap(), maybe_periodic: None, origin: root(), _phantom: PhantomData::::default(), @@ -701,8 +754,11 @@ fn migration_to_v4_works() { Some(ScheduledOf:: { maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, - call: Preimage::bound(Call::Logger(LoggerCall::log { i: 69, weight: 10 })) - .unwrap(), + call: Preimage::bound(Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), maybe_periodic: Some((456u64, 10)), origin: root(), _phantom: PhantomData::::default(), @@ -715,8 +771,11 @@ fn migration_to_v4_works() { Some(ScheduledOf:: { maybe_id: None, priority: 11, - call: Preimage::bound(Call::Logger(LoggerCall::log { i: 96, weight: 100 })) - .unwrap(), + call: Preimage::bound(Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + })) + .unwrap(), maybe_periodic: None, origin: root(), _phantom: PhantomData::::default(), @@ -725,8 +784,11 @@ fn migration_to_v4_works() { Some(ScheduledOf:: { maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, - call: Preimage::bound(Call::Logger(LoggerCall::log { i: 69, weight: 10 })) - .unwrap(), + call: Preimage::bound(Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), maybe_periodic: Some((456u64, 10)), origin: root(), _phantom: PhantomData::::default(), @@ -739,8 +801,11 @@ fn migration_to_v4_works() { Some(ScheduledOf:: { maybe_id: None, priority: 12, - call: Preimage::bound(Call::Logger(LoggerCall::log { i: 96, weight: 100 })) - .unwrap(), + call: Preimage::bound(Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + })) + .unwrap(), maybe_periodic: None, origin: root(), _phantom: PhantomData::::default(), @@ -749,8 +814,11 @@ fn migration_to_v4_works() { Some(ScheduledOf:: { maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, - call: Preimage::bound(Call::Logger(LoggerCall::log { i: 69, weight: 10 })) - .unwrap(), + call: Preimage::bound(Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), maybe_periodic: Some((456u64, 10)), origin: root(), _phantom: PhantomData::::default(), @@ -765,7 +833,6 @@ fn migration_to_v4_works() { } } assert_eq_uvec!(x, expected); - assert_eq!(Scheduler::current_storage_version(), 3); }); } @@ -779,8 +846,11 @@ fn test_migrate_origin() { Some(Scheduled { maybe_id: None, priority: i as u8 + 10, - call: Preimage::bound(Call::Logger(LoggerCall::log { i: 96, weight: 100 })) - .unwrap(), + call: Preimage::bound(Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + })) + .unwrap(), origin: 3u32, maybe_periodic: None, _phantom: Default::default(), @@ -790,8 +860,11 @@ fn test_migrate_origin() { maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, origin: 2u32, - call: Preimage::bound(Call::Logger(LoggerCall::log { i: 69, weight: 10 })) - .unwrap(), + call: Preimage::bound(Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), maybe_periodic: Some((456u64, 10)), _phantom: Default::default(), }), @@ -822,7 +895,7 @@ fn test_migrate_origin() { priority: 10, call: Preimage::bound(Call::Logger(LoggerCall::log { i: 96, - weight: 100 + weight: Weight::from_ref_time(100) })) .unwrap(), maybe_periodic: None, @@ -835,7 +908,7 @@ fn test_migrate_origin() { priority: 123, call: Preimage::bound(Call::Logger(LoggerCall::log { i: 69, - weight: 10 + weight: Weight::from_ref_time(10) })) .unwrap(), maybe_periodic: Some((456u64, 10)), @@ -852,7 +925,7 @@ fn test_migrate_origin() { priority: 11, call: Preimage::bound(Call::Logger(LoggerCall::log { i: 96, - weight: 100 + weight: Weight::from_ref_time(10) })) .unwrap(), maybe_periodic: None, @@ -865,7 +938,7 @@ fn test_migrate_origin() { priority: 123, call: Preimage::bound(Call::Logger(LoggerCall::log { i: 69, - weight: 10 + weight: Weight::from_ref_time(10) })) .unwrap(), maybe_periodic: Some((456u64, 10)), @@ -882,7 +955,7 @@ fn test_migrate_origin() { priority: 12, call: Preimage::bound(Call::Logger(LoggerCall::log { i: 96, - weight: 100 + weight: Weight::from_ref_time(100) })) .unwrap(), maybe_periodic: None, @@ -895,7 +968,7 @@ fn test_migrate_origin() { priority: 123, call: Preimage::bound(Call::Logger(LoggerCall::log { i: 69, - weight: 10 + weight: Weight::from_ref_time(10) })) .unwrap(), maybe_periodic: Some((456u64, 10)), diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index daf7f8308c83e..f201c89eaf278 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -39,19 +39,21 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_scheduler. pub trait WeightInfo { - fn service_agendas() -> Weight; - fn service_agenda(i: u32, ) -> Weight; - fn service_task_base() -> Weight; - fn service_task_periodic() -> Weight; - fn service_task_named() -> Weight; - fn service_task_fetched(s: u32, ) -> Weight; - fn execute_dispatch_signed() -> Weight; - fn execute_dispatch_unsigned() -> Weight; + fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight; + fn on_initialize_named_resolved(s: u32, ) -> Weight; + fn on_initialize_periodic_resolved(s: u32, ) -> Weight; + fn on_initialize_resolved(s: u32, ) -> Weight; + fn on_initialize_named_aborted(s: u32, ) -> Weight; + fn on_initialize_aborted(s: u32, ) -> Weight; + fn on_initialize_periodic_named(s: u32, ) -> Weight; + fn on_initialize_periodic(s: u32, ) -> Weight; + fn on_initialize_named(s: u32, ) -> Weight; + fn on_initialize(s: u32, ) -> Weight; fn schedule(s: u32, ) -> Weight; fn cancel(s: u32, ) -> Weight; fn schedule_named(s: u32, ) -> Weight; @@ -61,96 +63,300 @@ pub trait WeightInfo { /// Weights for pallet_scheduler using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - fn service_agendas() -> Weight { 0 } - fn service_agenda(_i: u32, ) -> Weight { 0 } - fn service_task_base() -> Weight { 0 } - fn service_task_periodic() -> Weight { 0 } - fn service_task_named() -> Weight { 0 } - fn service_task_fetched(_s: u32, ) -> Weight { 0 } - fn execute_dispatch_signed() -> Weight { 0 } - fn execute_dispatch_unsigned() -> Weight { 0 } - + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { + Weight::from_ref_time(9_994_000 as RefTimeWeight) + // Standard Error: 20_000 + .saturating_add(Weight::from_ref_time(19_843_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((4 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:1 w:1) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_named_resolved(s: u32, ) -> Weight { + Weight::from_ref_time(10_318_000 as RefTimeWeight) + // Standard Error: 17_000 + .saturating_add(Weight::from_ref_time(15_451_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + fn on_initialize_periodic_resolved(s: u32, ) -> Weight { + Weight::from_ref_time(11_675_000 as RefTimeWeight) + // Standard Error: 17_000 + .saturating_add(Weight::from_ref_time(17_019_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:1 w:1) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + fn on_initialize_resolved(s: u32, ) -> Weight { + Weight::from_ref_time(11_934_000 as RefTimeWeight) + // Standard Error: 11_000 + .saturating_add(Weight::from_ref_time(14_134_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:0) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_named_aborted(s: u32, ) -> Weight { + Weight::from_ref_time(7_279_000 as RefTimeWeight) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(5_388_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:0) + fn on_initialize_aborted(s: u32, ) -> Weight { + Weight::from_ref_time(8_619_000 as RefTimeWeight) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(2_969_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + } + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_periodic_named(s: u32, ) -> Weight { + Weight::from_ref_time(16_129_000 as RefTimeWeight) + // Standard Error: 7_000 + .saturating_add(Weight::from_ref_time(9_772_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:2 w:2) + fn on_initialize_periodic(s: u32, ) -> Weight { + Weight::from_ref_time(15_785_000 as RefTimeWeight) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(7_208_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_named(s: u32, ) -> Weight { + Weight::from_ref_time(15_778_000 as RefTimeWeight) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(5_597_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:1 w:1) + fn on_initialize(s: u32, ) -> Weight { + Weight::from_ref_time(15_912_000 as RefTimeWeight) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(4_530_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + } // Storage: Scheduler Agenda (r:1 w:1) fn schedule(s: u32, ) -> Weight { - (18_013_000 as Weight) + Weight::from_ref_time(18_013_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((87_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn cancel(s: u32, ) -> Weight { - (18_131_000 as Weight) + Weight::from_ref_time(18_131_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((595_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn schedule_named(s: u32, ) -> Weight { - (21_230_000 as Weight) + Weight::from_ref_time(21_230_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((98_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(98_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_named(s: u32, ) -> Weight { - (20_139_000 as Weight) + Weight::from_ref_time(20_139_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((595_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } // For backwards compatibility and tests impl WeightInfo for () { - fn service_agendas() -> Weight { 0 } - fn service_agenda(_i: u32, ) -> Weight { 0 } - fn service_task_base() -> Weight { 0 } - fn service_task_periodic() -> Weight { 0 } - fn service_task_named() -> Weight { 0 } - fn service_task_fetched(_s: u32, ) -> Weight { 0 } - fn execute_dispatch_signed() -> Weight { 0 } - fn execute_dispatch_unsigned() -> Weight { 0 } - + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { + Weight::from_ref_time(9_994_000 as RefTimeWeight) + // Standard Error: 20_000 + .saturating_add(Weight::from_ref_time(19_843_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((4 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:1 w:1) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_named_resolved(s: u32, ) -> Weight { + Weight::from_ref_time(10_318_000 as RefTimeWeight) + // Standard Error: 17_000 + .saturating_add(Weight::from_ref_time(15_451_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + fn on_initialize_periodic_resolved(s: u32, ) -> Weight { + Weight::from_ref_time(11_675_000 as RefTimeWeight) + // Standard Error: 17_000 + .saturating_add(Weight::from_ref_time(17_019_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:1 w:1) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + fn on_initialize_resolved(s: u32, ) -> Weight { + Weight::from_ref_time(11_934_000 as RefTimeWeight) + // Standard Error: 11_000 + .saturating_add(Weight::from_ref_time(14_134_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:0) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_named_aborted(s: u32, ) -> Weight { + Weight::from_ref_time(7_279_000 as RefTimeWeight) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(5_388_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:0) + fn on_initialize_aborted(s: u32, ) -> Weight { + Weight::from_ref_time(8_619_000 as RefTimeWeight) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(2_969_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + } + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_periodic_named(s: u32, ) -> Weight { + Weight::from_ref_time(16_129_000 as RefTimeWeight) + // Standard Error: 7_000 + .saturating_add(Weight::from_ref_time(9_772_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:2 w:2) + fn on_initialize_periodic(s: u32, ) -> Weight { + Weight::from_ref_time(15_785_000 as RefTimeWeight) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(7_208_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_named(s: u32, ) -> Weight { + Weight::from_ref_time(15_778_000 as RefTimeWeight) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(5_597_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + } + // Storage: Scheduler Agenda (r:1 w:1) + fn on_initialize(s: u32, ) -> Weight { + Weight::from_ref_time(15_912_000 as RefTimeWeight) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(4_530_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + } // Storage: Scheduler Agenda (r:1 w:1) fn schedule(s: u32, ) -> Weight { - (18_013_000 as Weight) + Weight::from_ref_time(18_013_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((87_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn cancel(s: u32, ) -> Weight { - (18_131_000 as Weight) + Weight::from_ref_time(18_131_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((595_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn schedule_named(s: u32, ) -> Weight { - (21_230_000 as Weight) + Weight::from_ref_time(21_230_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((98_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(98_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_named(s: u32, ) -> Weight { - (20_139_000 as Weight) + Weight::from_ref_time(20_139_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((595_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index abdb9b2acc9b5..dd96a5df2baf9 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -110,6 +110,7 @@ use sp_std::{fmt::Debug, prelude::*}; type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; type PoolT = Vec<(::AccountId, Option<>::Score>)>; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// The enum is supplied when refreshing the members set. /// Depending on the enum variant the corresponding associated @@ -280,7 +281,7 @@ pub mod pallet { let pool = >::get(); >::refresh_members(pool, ChangeReceiver::MembershipChanged); } - 0 + Weight::zero() } } @@ -346,7 +347,7 @@ pub mod pallet { #[pallet::weight(0)] pub fn kick( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, index: u32, ) -> DispatchResult { T::KickOrigin::ensure_origin(origin)?; @@ -370,7 +371,7 @@ pub mod pallet { #[pallet::weight(0)] pub fn score( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, index: u32, score: T::Score, ) -> DispatchResult { diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 4fef5385eb2c5..e38e0a18b99c8 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -50,7 +50,7 @@ frame_support::construct_runtime!( parameter_types! { pub const CandidateDeposit: u64 = 25; pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } ord_parameter_types! { pub const KickOrigin: u64 = 2; diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index c777f2c56de3a..2181493f72947 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -144,16 +144,6 @@ parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; } -pub type Extrinsic = sp_runtime::testing::TestXt; - -impl frame_system::offchain::SendTransactionTypes for Test -where - Call: From, -{ - type OverarchingCall = Call; - type Extrinsic = Extrinsic; -} - pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { type System = Test; diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 2a749f2aae9dd..c72ab8c210d69 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -39,8 +39,8 @@ use sp_session::{MembershipProof, ValidatorCount}; use sp_staking::SessionIndex; use sp_std::prelude::*; use sp_trie::{ - trie_types::{TrieDB, TrieDBMutV0}, - MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, + trie_types::{TrieDBBuilder, TrieDBMutBuilderV0}, + LayoutV0, MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, }; use frame_support::{ @@ -236,7 +236,7 @@ impl ProvingTrie { let mut root = Default::default(); { - let mut trie = TrieDBMutV0::new(&mut db, &mut root); + let mut trie = TrieDBMutBuilderV0::new(&mut db, &mut root).build(); for (i, (validator, full_id)) in validators.into_iter().enumerate() { let i = i as u32; let keys = match >::load_keys(&validator) { @@ -278,19 +278,20 @@ impl ProvingTrie { /// Prove the full verification data for a given key and key ID. pub fn prove(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option>> { - let trie = TrieDB::new(&self.db, &self.root).ok()?; - let mut recorder = Recorder::new(); - let val_idx = (key_id, key_data).using_encoded(|s| { - trie.get_with(s, &mut recorder) - .ok()? - .and_then(|raw| u32::decode(&mut &*raw).ok()) - })?; - - val_idx.using_encoded(|s| { - trie.get_with(s, &mut recorder) - .ok()? - .and_then(|raw| >::decode(&mut &*raw).ok()) - })?; + let mut recorder = Recorder::>::new(); + { + let trie = + TrieDBBuilder::new(&self.db, &self.root).with_recorder(&mut recorder).build(); + let val_idx = (key_id, key_data).using_encoded(|s| { + trie.get(s).ok()?.and_then(|raw| u32::decode(&mut &*raw).ok()) + })?; + + val_idx.using_encoded(|s| { + trie.get(s) + .ok()? + .and_then(|raw| >::decode(&mut &*raw).ok()) + })?; + } Some(recorder.drain().into_iter().map(|r| r.data).collect()) } @@ -303,7 +304,7 @@ impl ProvingTrie { // Check a proof contained within the current memory-db. Returns `None` if the // nodes within the current `MemoryDB` are insufficient to query the item. fn query(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option> { - let trie = TrieDB::new(&self.db, &self.root).ok()?; + let trie = TrieDBBuilder::new(&self.db, &self.root).build(); let val_idx = (key_id, key_data) .using_encoded(|s| trie.get(s)) .ok()? diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 71ee9d1e0758a..34c560984661d 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -574,7 +574,7 @@ pub mod pallet { // NOTE: the non-database part of the weight for `should_end_session(n)` is // included as weight for empty block, the database part is expected to be in // cache. - 0 + Weight::zero() } } } diff --git a/frame/session/src/migrations/v1.rs b/frame/session/src/migrations/v1.rs index 3c687ea7d9d66..c0dce422fe8b5 100644 --- a/frame/session/src/migrations/v1.rs +++ b/frame/session/src/migrations/v1.rs @@ -47,7 +47,7 @@ pub fn migrate::on_chain_storage_version(); @@ -82,7 +82,7 @@ pub fn migrate sp_io::TestExternalities { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/session/src/weights.rs b/frame/session/src/weights.rs index 40ae7f1be4265..5208a679c6bb7 100644 --- a/frame/session/src/weights.rs +++ b/frame/session/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_session. @@ -55,17 +55,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:4 w:4) fn set_keys() -> Weight { - (48_484_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(48_484_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:0 w:4) fn purge_keys() -> Weight { - (38_003_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(38_003_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } } @@ -75,16 +75,16 @@ impl WeightInfo for () { // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:4 w:4) fn set_keys() -> Weight { - (48_484_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(48_484_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:0 w:4) fn purge_keys() -> Weight { - (38_003_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(38_003_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } } diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 2a6428e754b9d..626562f7a45f8 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -281,6 +281,7 @@ type BalanceOf = type NegativeImbalanceOf = <>::Currency as Currency< ::AccountId, >>::NegativeImbalance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// A vote by a member on a candidate application. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] @@ -613,7 +614,7 @@ pub mod pallet { fn on_initialize(n: T::BlockNumber) -> Weight { let mut members = vec![]; - let mut weight = 0; + let mut weight = Weight::new(); let weights = T::BlockWeights::get(); // Run a candidate/membership rotation @@ -823,11 +824,12 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn vouch( origin: OriginFor, - who: T::AccountId, + who: AccountIdLookupOf, value: BalanceOf, tip: BalanceOf, ) -> DispatchResult { let voucher = ensure_signed(origin)?; + let who = T::Lookup::lookup(who)?; // Check user is not suspended. ensure!(!>::contains_key(&who), Error::::Suspended); ensure!(!>::contains_key(&who), Error::::Suspended); @@ -914,7 +916,7 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn vote( origin: OriginFor, - candidate: ::Source, + candidate: AccountIdLookupOf, approve: bool, ) -> DispatchResult { let voter = ensure_signed(origin)?; @@ -1026,11 +1028,12 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn found( origin: OriginFor, - founder: T::AccountId, + founder: AccountIdLookupOf, max_members: u32, rules: Vec, ) -> DispatchResult { T::FounderSetOrigin::ensure_origin(origin)?; + let founder = T::Lookup::lookup(founder)?; ensure!(!>::exists(), Error::::AlreadyFounded); ensure!(max_members > 1, Error::::MaxMembers); // This should never fail in the context of this function... @@ -1104,10 +1107,11 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn judge_suspended_member( origin: OriginFor, - who: T::AccountId, + who: AccountIdLookupOf, forgive: bool, ) -> DispatchResult { T::SuspensionJudgementOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; ensure!(>::contains_key(&who), Error::::NotSuspended); if forgive { @@ -1180,10 +1184,11 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn judge_suspended_candidate( origin: OriginFor, - who: T::AccountId, + who: AccountIdLookupOf, judgement: Judgement, ) -> DispatchResult { T::SuspensionJudgementOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; if let Some((value, kind)) = >::get(&who) { match judgement { Judgement::Approve => { diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 04ea705eed556..ed668e79269fd 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -50,7 +50,7 @@ frame_support::construct_runtime!( parameter_types! { pub const SocietyPalletId: PalletId = PalletId(*b"py/socie"); pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } ord_parameter_types! { diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 0950478fba089..37d13a54ba5ed 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -71,6 +71,6 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-election-provider-support/runtime-benchmarks", "rand_chacha", - "sp-staking/runtime-benchmarks" + "sp-staking/runtime-benchmarks", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/staking/reward-curve/src/lib.rs b/frame/staking/reward-curve/src/lib.rs index e66f6fde37599..e1ea8aa7b15d5 100644 --- a/frame/staking/reward-curve/src/lib.rs +++ b/frame/staking/reward-curve/src/lib.rs @@ -28,7 +28,7 @@ use syn::parse::{Parse, ParseStream}; /// Accepts a number of expressions to create a instance of PiecewiseLinear which represents the /// NPoS curve (as detailed -/// [here](https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model)) +/// [here](https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html#inflation-model)) /// for those parameters. Parameters are: /// - `min_inflation`: the minimal amount to be rewarded between validators, expressed as a fraction /// of total issuance. Known as `I_0` in the literature. Expressed in millionth, must be between 0 diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 12de0ff9cc665..2d5943b51758d 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -83,7 +83,7 @@ pub fn create_validator_with_nominators( let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(v_controller).into(), validator_prefs)?; - let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); + let stash_lookup = T::Lookup::unlookup(v_stash.clone()); points_total += 10; points_individual.push((v_stash.clone(), 10)); @@ -217,8 +217,7 @@ benchmarks! { bond { let stash = create_funded_user::("stash", USER_SEED, 100); let controller = create_funded_user::("controller", USER_SEED, 100); - let controller_lookup: ::Source - = T::Lookup::unlookup(controller.clone()); + let controller_lookup = T::Lookup::unlookup(controller.clone()); let reward_destination = RewardDestination::Staked; let amount = T::Currency::minimum_balance() * 10u32.into(); whitelist_account!(stash); @@ -365,7 +364,7 @@ benchmarks! { 100, Default::default(), )?; - let stash_lookup: ::Source = T::Lookup::unlookup(stash.clone()); + let stash_lookup = T::Lookup::unlookup(stash.clone()); // they start validating. Staking::::validate(RawOrigin::Signed(controller.clone()).into(), Default::default())?; diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 360d5b5efb58f..38466c8cb1d62 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -309,7 +309,7 @@ use frame_support::{ use scale_info::TypeInfo; use sp_runtime::{ curve::PiecewiseLinear, - traits::{AtLeast32BitUnsigned, Convert, Saturating, Zero}, + traits::{AtLeast32BitUnsigned, Convert, Saturating, StaticLookup, Zero}, Perbill, Perquintill, RuntimeDebug, }; use sp_staking::{ @@ -347,6 +347,8 @@ type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + parameter_types! { pub MaxUnlockingChunks: u32 = 32; } @@ -529,13 +531,26 @@ impl StakingLedger { (self, unlocking_balance) } - /// Slash the staker for a given amount of balance. This can grow the value of the slash in the - /// case that either the active bonded or some unlocking chunks become dust after slashing. - /// Returns the amount of funds actually slashed. + /// Slash the staker for a given amount of balance. /// - /// `slash_era` is the era in which the slash (which is being enacted now) actually happened. + /// This implements a proportional slashing system, whereby we set our preference to slash as + /// such: + /// + /// - If any unlocking chunks exist that are scheduled to be unlocked at `slash_era + + /// bonding_duration` and onwards, the slash is divided equally between the active ledger and + /// the unlocking chunks. + /// - If no such chunks exist, then only the active balance is slashed. + /// + /// Note that the above is only a *preference*. If for any reason the active ledger, with or + /// without some portion of the unlocking chunks that are more justified to be slashed are not + /// enough, then the slashing will continue and will consume as much of the active and unlocking + /// chunks as needed. + /// + /// This will never slash more than the given amount. If any of the chunks become dusted, the + /// last chunk is slashed slightly less to compensate. Returns the amount of funds actually + /// slashed. /// - /// # Note + /// `slash_era` is the era in which the slash (which is being enacted now) actually happened. /// /// This calls `Config::OnStakerSlash::on_slash` with information as to how the slash was /// applied. @@ -545,54 +560,81 @@ impl StakingLedger { minimum_balance: BalanceOf, slash_era: EraIndex, ) -> BalanceOf { - use sp_staking::OnStakerSlash as _; - if slash_amount.is_zero() { return Zero::zero() } + use sp_staking::OnStakerSlash as _; let mut remaining_slash = slash_amount; let pre_slash_total = self.total; - let era_after_slash = slash_era + 1; - let chunk_unlock_era_after_slash = era_after_slash + T::BondingDuration::get(); + // for a `slash_era = x`, any chunk that is scheduled to be unlocked at era `x + 28` + // (assuming 28 is the bonding duration) onwards should be slashed. + let slashable_chunks_start = slash_era + T::BondingDuration::get(); - // Calculate the total balance of active funds and unlocking funds in the affected range. - let (affected_balance, slash_chunks_priority): (_, Box>) = { - if let Some(start_index) = - self.unlocking.iter().position(|c| c.era >= chunk_unlock_era_after_slash) + // `Some(ratio)` if this is proportional, with `ratio`, `None` otherwise. In both cases, we + // slash first the active chunk, and then `slash_chunks_priority`. + let (maybe_proportional, slash_chunks_priority) = { + if let Some(first_slashable_index) = + self.unlocking.iter().position(|c| c.era >= slashable_chunks_start) { + // If there exists a chunk who's after the first_slashable_start, then this is a + // proportional slash, because we want to slash active and these chunks + // proportionally. + // The indices of the first chunk after the slash up through the most recent chunk. // (The most recent chunk is at greatest from this era) - let affected_indices = start_index..self.unlocking.len(); + let affected_indices = first_slashable_index..self.unlocking.len(); let unbonding_affected_balance = affected_indices.clone().fold(BalanceOf::::zero(), |sum, i| { - if let Some(chunk) = self.unlocking.get_mut(i).defensive() { + if let Some(chunk) = self.unlocking.get(i).defensive() { sum.saturating_add(chunk.value) } else { sum } }); + let affected_balance = self.active.saturating_add(unbonding_affected_balance); + let ratio = Perquintill::from_rational(slash_amount, affected_balance); ( - self.active.saturating_add(unbonding_affected_balance), - Box::new(affected_indices.chain((0..start_index).rev())), + Some(ratio), + affected_indices.chain((0..first_slashable_index).rev()).collect::>(), ) } else { - (self.active, Box::new((0..self.unlocking.len()).rev())) + // We just slash from the last chunk to the most recent one, if need be. + (None, (0..self.unlocking.len()).rev().collect::>()) } }; // Helper to update `target` and the ledgers total after accounting for slashing `target`. - let ratio = Perquintill::from_rational(slash_amount, affected_balance); + log!( + debug, + "slashing {:?} for era {:?} out of {:?}, priority: {:?}, proportional = {:?}", + slash_amount, + slash_era, + self, + slash_chunks_priority, + maybe_proportional, + ); + let mut slash_out_of = |target: &mut BalanceOf, slash_remaining: &mut BalanceOf| { - let mut slash_from_target = - if slash_amount < affected_balance { ratio * (*target) } else { *slash_remaining } - .min(*target); + let mut slash_from_target = if let Some(ratio) = maybe_proportional { + ratio * (*target) + } else { + *slash_remaining + } + // this is the total that that the slash target has. We can't slash more than + // this anyhow! + .min(*target) + // this is the total amount that we would have wanted to slash + // non-proportionally, a proportional slash should never exceed this either! + .min(*slash_remaining); // slash out from *target exactly `slash_from_target`. *target = *target - slash_from_target; if *target < minimum_balance { - // Slash the rest of the target if its dust + // Slash the rest of the target if it's dust. This might cause the last chunk to be + // slightly under-slashed, by at most `MaxUnlockingChunks * ED`, which is not a big + // deal. slash_from_target = sp_std::mem::replace(target, Zero::zero()).saturating_add(slash_from_target) } @@ -606,10 +648,11 @@ impl StakingLedger { let mut slashed_unlocking = BTreeMap::<_, _>::new(); for i in slash_chunks_priority { + if remaining_slash.is_zero() { + break + } + if let Some(chunk) = self.unlocking.get_mut(i).defensive() { - if remaining_slash.is_zero() { - break - } slash_out_of(&mut chunk.value, &mut remaining_slash); // write the new slashed value of this chunk to the map. slashed_unlocking.insert(chunk.era, chunk.value); @@ -618,7 +661,9 @@ impl StakingLedger { } } + // clean unlocking chunks that are set to zero. self.unlocking.retain(|c| !c.value.is_zero()); + T::OnStakerSlash::on_slash(&self.stash, self.active, &slashed_unlocking); pre_slash_total.saturating_sub(self.total) } @@ -829,16 +874,17 @@ enum Releases { V2_0_0, V3_0_0, V4_0_0, - V5_0_0, // blockable validators. - V6_0_0, // removal of all storage associated with offchain phragmen. - V7_0_0, // keep track of number of nominators / validators in map - V8_0_0, // populate `VoterList`. - V9_0_0, // inject validators into `VoterList` as well. + V5_0_0, // blockable validators. + V6_0_0, // removal of all storage associated with offchain phragmen. + V7_0_0, // keep track of number of nominators / validators in map + V8_0_0, // populate `VoterList`. + V9_0_0, // inject validators into `VoterList` as well. + V10_0_0, // remove `EarliestUnappliedSlash`. } impl Default for Releases { fn default() -> Self { - Releases::V8_0_0 + Releases::V10_0_0 } } diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index 14846da8a5d54..7e3bf6ccb93e1 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -20,6 +20,45 @@ use super::*; use frame_election_provider_support::SortedListProvider; use frame_support::traits::OnRuntimeUpgrade; +pub mod v10 { + use super::*; + use frame_support::storage_alias; + + #[storage_alias] + type EarliestUnappliedSlash = StorageValue, EraIndex>; + + /// Apply any pending slashes that where queued. + /// + /// That means we might slash someone a bit too early, but we will definitely + /// won't forget to slash them. The cap of 512 is somewhat randomly taken to + /// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`. + pub struct MigrateToV10(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV10 { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + if StorageVersion::::get() == Releases::V9_0_0 { + let pending_slashes = as Store>::UnappliedSlashes::iter().take(512); + for (era, slashes) in pending_slashes { + for slash in slashes { + // in the old slashing scheme, the slash era was the key at which we read + // from `UnappliedSlashes`. + log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era); + slashing::apply_slash::(slash, era); + } + } + + EarliestUnappliedSlash::::kill(); + StorageVersion::::put(Releases::V10_0_0); + + log!(info, "MigrateToV10 executed successfully"); + T::DbWeight::get().reads_writes(1, 1) + } else { + log!(warn, "MigrateToV10 should be removed."); + T::DbWeight::get().reads(1) + } + } + } +} + pub mod v9 { use super::*; diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index bd2d8cdc32ce9..7911428b3337c 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -33,7 +33,7 @@ use sp_core::H256; use sp_io; use sp_runtime::{ curve::PiecewiseLinear, - testing::{Header, TestXt, UintAuthorityId}, + testing::{Header, UintAuthorityId}, traits::{IdentityLookup, Zero}, }; use sp_staking::offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}; @@ -304,15 +304,6 @@ impl crate::pallet::pallet::Config for Test { type WeightInfo = (); } -impl frame_system::offchain::SendTransactionTypes for Test -where - Call: From, -{ - type OverarchingCall = Call; - type Extrinsic = Extrinsic; -} - -pub type Extrinsic = TestXt; pub(crate) type StakingCall = crate::Call; pub(crate) type TestRuntimeCall = ::Call; @@ -549,6 +540,7 @@ impl ExtBuilder { ext } pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + sp_tracing::try_init_simple(); let mut ext = self.build(); ext.execute_with(test); ext.execute_with(post_conditions); @@ -834,7 +826,7 @@ pub(crate) fn on_offence_now( pub(crate) fn add_slash(who: &AccountId) { on_offence_now( &[OffenceDetails { - offender: (who.clone(), Staking::eras_stakers(active_era(), who.clone())), + offender: (*who, Staking::eras_stakers(active_era(), *who)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -884,6 +876,20 @@ pub(crate) fn staking_events() -> Vec> { .collect() } +parameter_types! { + static StakingEventsIndex: usize = 0; +} + +pub(crate) fn staking_events_since_last_call() -> Vec> { + let all: Vec<_> = System::events() + .into_iter() + .filter_map(|r| if let Event::Staking(inner) = r.event { Some(inner) } else { None }) + .collect(); + let seen = StakingEventsIndex::get(); + StakingEventsIndex::set(all.len()); + all.into_iter().skip(seen).collect() +} + pub(crate) fn balances(who: &AccountId) -> (Balance, Balance) { (Balances::free_balance(who), Balances::reserved_balance(who)) } diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 7656eec80a5ff..2a55d3baea2e6 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -32,7 +32,7 @@ use frame_support::{ use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::historical; use sp_runtime::{ - traits::{Bounded, Convert, SaturatedConversion, Saturating, StaticLookup, Zero}, + traits::{Bounded, Convert, One, SaturatedConversion, Saturating, StaticLookup, Zero}, Perbill, }; use sp_staking::{ @@ -599,20 +599,17 @@ impl Pallet { /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. fn apply_unapplied_slashes(active_era: EraIndex) { - let slash_defer_duration = T::SlashDeferDuration::get(); - ::EarliestUnappliedSlash::mutate(|earliest| { - if let Some(ref mut earliest) = earliest { - let keep_from = active_era.saturating_sub(slash_defer_duration); - for era in (*earliest)..keep_from { - let era_slashes = ::UnappliedSlashes::take(&era); - for slash in era_slashes { - slashing::apply_slash::(slash, era); - } - } - - *earliest = (*earliest).max(keep_from) - } - }) + let era_slashes = ::UnappliedSlashes::take(&active_era); + log!( + debug, + "found {} slashes scheduled to be executed in era {:?}", + era_slashes.len(), + active_era, + ); + for slash in era_slashes { + let slash_era = active_era.saturating_sub(T::SlashDeferDuration::get()); + slashing::apply_slash::(slash, slash_era); + } } /// Add reward points to validators using their stash account ID. @@ -1170,7 +1167,7 @@ where disable_strategy: DisableStrategy, ) -> Weight { let reward_proportion = SlashRewardFraction::::get(); - let mut consumed_weight: Weight = 0; + let mut consumed_weight = Weight::from_ref_time(0); let mut add_db_reads_writes = |reads, writes| { consumed_weight += T::DbWeight::get().reads_writes(reads, writes); }; @@ -1209,11 +1206,6 @@ where } }; - ::EarliestUnappliedSlash::mutate(|earliest| { - if earliest.is_none() { - *earliest = Some(active_era) - } - }); add_db_reads_writes(1, 1); let slash_defer_duration = T::SlashDeferDuration::get(); @@ -1263,9 +1255,18 @@ where } } else { // Defer to end of some `slash_defer_duration` from now. - ::UnappliedSlashes::mutate(active_era, move |for_later| { - for_later.push(unapplied) - }); + log!( + debug, + "deferring slash of {:?}% happened in {:?} (reported in {:?}) to {:?}", + slash_fraction, + slash_era, + active_era, + slash_era + slash_defer_duration + 1, + ); + ::UnappliedSlashes::mutate( + slash_era.saturating_add(slash_defer_duration).saturating_add(One::one()), + move |for_later| for_later.push(unapplied), + ); add_db_reads_writes(1, 1); } } else { diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index e53464195de23..1f11f0ff00ac1 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -27,7 +27,7 @@ use frame_support::{ }, weights::Weight, }; -use frame_system::{ensure_root, ensure_signed, offchain::SendTransactionTypes, pallet_prelude::*}; +use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; use sp_runtime::{ traits::{CheckedSub, SaturatedConversion, StaticLookup, Zero}, Perbill, Percent, @@ -40,10 +40,10 @@ mod impls; pub use impls::*; use crate::{ - slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraPayout, EraRewardPoints, Exposure, - Forcing, MaxUnlockingChunks, NegativeImbalanceOf, Nominations, PositiveImbalanceOf, Releases, - RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, - ValidatorPrefs, + slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, + EraRewardPoints, Exposure, Forcing, MaxUnlockingChunks, NegativeImbalanceOf, Nominations, + PositiveImbalanceOf, Releases, RewardDestination, SessionInterface, StakingLedger, + UnappliedSlash, UnlockChunk, ValidatorPrefs, }; const STAKING_ID: LockIdentifier = *b"staking "; @@ -73,7 +73,7 @@ pub mod pallet { } #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config { /// The staking balance. type Currency: LockableCurrency< Self::AccountId, @@ -477,10 +477,6 @@ pub mod pallet { ValueQuery, >; - /// The earliest era for which we have a pending, unapplied slash. - #[pallet::storage] - pub(crate) type EarliestUnappliedSlash = StorageValue<_, EraIndex>; - /// The last planned session scheduled by the session pallet. /// /// This is basically in sync with the call to [`pallet_session::SessionManager::new_session`]. @@ -772,7 +768,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::bond())] pub fn bond( origin: OriginFor, - controller: ::Source, + controller: AccountIdLookupOf, #[pallet::compact] value: BalanceOf, payee: RewardDestination, ) -> DispatchResult { @@ -1059,7 +1055,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::nominate(targets.len() as u32))] pub fn nominate( origin: OriginFor, - targets: Vec<::Source>, + targets: Vec>, ) -> DispatchResult { let controller = ensure_signed(origin)?; @@ -1179,7 +1175,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_controller())] pub fn set_controller( origin: OriginFor, - controller: ::Source, + controller: AccountIdLookupOf, ) -> DispatchResult { let stash = ensure_signed(origin)?; let old_controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; @@ -1525,10 +1521,7 @@ pub mod pallet { /// Note: Making this call only makes sense if you first set the validator preferences to /// block any further nominations. #[pallet::weight(T::WeightInfo::kick(who.len() as u32))] - pub fn kick( - origin: OriginFor, - who: Vec<::Source>, - ) -> DispatchResult { + pub fn kick(origin: OriginFor, who: Vec>) -> DispatchResult { let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index ba67292ddc434..0e0ac76523471 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -78,8 +78,7 @@ pub fn create_stash_controller( ) -> Result<(T::AccountId, T::AccountId), &'static str> { let stash = create_funded_user::("stash", n, balance_factor); let controller = create_funded_user::("controller", n, balance_factor); - let controller_lookup: ::Source = - T::Lookup::unlookup(controller.clone()); + let controller_lookup = T::Lookup::unlookup(controller.clone()); let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); Staking::::bond( RawOrigin::Signed(stash.clone()).into(), @@ -98,8 +97,7 @@ pub fn create_stash_controller_with_balance( ) -> Result<(T::AccountId, T::AccountId), &'static str> { let stash = create_funded_user_with_balance::("stash", n, balance); let controller = create_funded_user_with_balance::("controller", n, balance); - let controller_lookup: ::Source = - T::Lookup::unlookup(controller.clone()); + let controller_lookup = T::Lookup::unlookup(controller.clone()); Staking::::bond( RawOrigin::Signed(stash.clone()).into(), @@ -120,8 +118,7 @@ pub fn create_stash_and_dead_controller( let stash = create_funded_user::("stash", n, balance_factor); // controller has no funds let controller = create_funded_user::("controller", n, 0); - let controller_lookup: ::Source = - T::Lookup::unlookup(controller.clone()); + let controller_lookup = T::Lookup::unlookup(controller.clone()); let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); Staking::::bond( RawOrigin::Signed(stash.clone()).into(), @@ -136,7 +133,7 @@ pub fn create_stash_and_dead_controller( pub fn create_validators( max: u32, balance_factor: u32, -) -> Result::Source>, &'static str> { +) -> Result>, &'static str> { create_validators_with_seed::(max, balance_factor, 0) } @@ -145,15 +142,15 @@ pub fn create_validators_with_seed( max: u32, balance_factor: u32, seed: u32, -) -> Result::Source>, &'static str> { - let mut validators: Vec<::Source> = Vec::with_capacity(max as usize); +) -> Result>, &'static str> { + let mut validators: Vec> = Vec::with_capacity(max as usize); for i in 0..max { let (stash, controller) = create_stash_controller::(i + seed, balance_factor, RewardDestination::Staked)?; let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; - let stash_lookup: ::Source = T::Lookup::unlookup(stash); + let stash_lookup = T::Lookup::unlookup(stash); validators.push(stash_lookup); } Ok(validators) @@ -180,11 +177,10 @@ pub fn create_validators_with_nominators_for_era( edge_per_nominator: usize, randomize_stake: bool, to_nominate: Option, -) -> Result::Source>, &'static str> { +) -> Result>, &'static str> { clear_validators_and_nominators::(); - let mut validators_stash: Vec<::Source> = - Vec::with_capacity(validators as usize); + let mut validators_stash: Vec> = Vec::with_capacity(validators as usize); let mut rng = ChaChaRng::from_seed(SEED.using_encoded(blake2_256)); // Create validators @@ -195,8 +191,7 @@ pub fn create_validators_with_nominators_for_era( let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(v_controller.clone()).into(), validator_prefs)?; - let stash_lookup: ::Source = - T::Lookup::unlookup(v_stash.clone()); + let stash_lookup = T::Lookup::unlookup(v_stash.clone()); validators_stash.push(stash_lookup.clone()); } @@ -211,7 +206,7 @@ pub fn create_validators_with_nominators_for_era( // Have them randomly validate let mut available_validators = validator_chosen.clone(); - let mut selected_validators: Vec<::Source> = + let mut selected_validators: Vec> = Vec::with_capacity(edge_per_nominator); for _ in 0..validators.min(edge_per_nominator as u32) { diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 9a13a818f4b59..cda606008a9cc 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -1082,10 +1082,7 @@ fn validator_payment_prefs_work() { // This test will focus on validator payment. ExtBuilder::default().build_and_execute(|| { let commission = Perbill::from_percent(40); - >::insert( - &11, - ValidatorPrefs { commission: commission.clone(), ..Default::default() }, - ); + >::insert(&11, ValidatorPrefs { commission, ..Default::default() }); // Reward controller so staked ratio doesn't change. >::insert(&11, RewardDestination::Controller); @@ -2081,8 +2078,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { let _ = Balances::make_free_balance_be(&11, stake); let _ = Balances::make_free_balance_be(&2, stake); - // only slashes out of bonded stake are applied. without this line, - // it is 0. + // only slashes out of bonded stake are applied. without this line, it is 0. Staking::bond(Origin::signed(2), 20000, stake - 1, RewardDestination::default()).unwrap(); // Override exposure of 11 ErasStakers::::insert( @@ -2104,7 +2100,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { &[Perbill::from_percent(100)], ); - assert_eq!(Balances::total_balance(&11), stake); + assert_eq!(Balances::total_balance(&11), stake - 1); assert_eq!(Balances::total_balance(&2), 1); }) } @@ -2778,12 +2774,103 @@ fn deferred_slashes_are_deferred() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); + System::reset_events(); + // at the start of era 4, slashes from era 1 are processed, // after being deferred for at least 2 full eras. mock::start_active_era(4); assert_eq!(Balances::free_balance(11), 900); assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid(3, 11075, 33225), + Event::Slashed(11, 100), + Event::Slashed(101, 12) + ] + ); + }) +} + +#[test] +fn retroactive_deferred_slashes_two_eras_before() { + ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { + assert_eq!(BondingDuration::get(), 3); + + mock::start_active_era(1); + let exposure_11_at_era1 = Staking::eras_stakers(active_era(), 11); + + mock::start_active_era(3); + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], + &[Perbill::from_percent(10)], + 1, // should be deferred for two full eras, and applied at the beginning of era 4. + DisableStrategy::Never, + ); + System::reset_events(); + + mock::start_active_era(4); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid(3, 7100, 21300), + Event::Slashed(11, 100), + Event::Slashed(101, 12) + ] + ); + }) +} + +#[test] +fn retroactive_deferred_slashes_one_before() { + ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { + assert_eq!(BondingDuration::get(), 3); + + mock::start_active_era(1); + let exposure_11_at_era1 = Staking::eras_stakers(active_era(), 11); + + // unbond at slash era. + mock::start_active_era(2); + assert_ok!(Staking::chill(Origin::signed(10))); + assert_ok!(Staking::unbond(Origin::signed(10), 100)); + + mock::start_active_era(3); + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], + &[Perbill::from_percent(10)], + 2, // should be deferred for two full eras, and applied at the beginning of era 5. + DisableStrategy::Never, + ); + System::reset_events(); + + mock::start_active_era(4); + assert_eq!( + staking_events_since_last_call(), + vec![Event::StakersElected, Event::EraPaid(3, 11075, 33225)] + ); + + assert_eq!(Staking::ledger(10).unwrap().total, 1000); + // slash happens after the next line. + mock::start_active_era(5); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid(4, 11075, 33225), + Event::Slashed(11, 100), + Event::Slashed(101, 12) + ] + ); + + // their ledger has already been slashed. + assert_eq!(Staking::ledger(10).unwrap().total, 900); + assert_ok!(Staking::unbond(Origin::signed(10), 1000)); + assert_eq!(Staking::ledger(10).unwrap().total, 900); }) } @@ -2872,6 +2959,7 @@ fn remove_deferred() { assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + // deferred to start of era 4. on_offence_now( &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(10)], @@ -2882,6 +2970,7 @@ fn remove_deferred() { mock::start_active_era(2); + // reported later, but deferred to start of era 4 as well. on_offence_in_era( &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(15)], @@ -2895,7 +2984,8 @@ fn remove_deferred() { Error::::EmptyTargets ); - assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 1, vec![0])); + // cancel one of them. + assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 4, vec![0])); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); @@ -2907,13 +2997,19 @@ fn remove_deferred() { // at the start of era 4, slashes from era 1 are processed, // after being deferred for at least 2 full eras. + System::reset_events(); mock::start_active_era(4); - // the first slash for 10% was cancelled, so no effect. - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); - - mock::start_active_era(5); + // the first slash for 10% was cancelled, but the 15% one + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid(3, 11075, 33225), + Event::Slashed(11, 50), + Event::Slashed(101, 7) + ] + ); let slash_10 = Perbill::from_percent(10); let slash_15 = Perbill::from_percent(15); @@ -2966,7 +3062,7 @@ fn remove_multi_deferred() { &[Perbill::from_percent(25)], ); - assert_eq!(::UnappliedSlashes::get(&1).len(), 5); + assert_eq!(::UnappliedSlashes::get(&4).len(), 5); // fails if list is not sorted assert_noop!( @@ -2984,9 +3080,9 @@ fn remove_multi_deferred() { Error::::InvalidSlashIndex ); - assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 1, vec![0, 2, 4])); + assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 4, vec![0, 2, 4])); - let slashes = ::UnappliedSlashes::get(&1); + let slashes = ::UnappliedSlashes::get(&4); assert_eq!(slashes.len(), 2); assert_eq!(slashes[0].validator, 21); assert_eq!(slashes[1].validator, 42); @@ -3663,7 +3759,7 @@ fn payout_stakers_handles_weight_refund() { let half_max_nom_rewarded_weight = ::WeightInfo::payout_stakers_alive_staked(half_max_nom_rewarded); let zero_nom_payouts_weight = ::WeightInfo::payout_stakers_alive_staked(0); - assert!(zero_nom_payouts_weight > 0); + assert!(zero_nom_payouts_weight > Weight::zero()); assert!(half_max_nom_rewarded_weight > zero_nom_payouts_weight); assert!(max_nom_rewarded_weight > half_max_nom_rewarded_weight); @@ -3802,42 +3898,68 @@ fn bond_during_era_correctly_populates_claimed_rewards() { fn offences_weight_calculated_correctly() { ExtBuilder::default().nominate(true).build_and_execute(|| { // On offence with zero offenders: 4 Reads, 1 Write - let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); - assert_eq!(Staking::on_offence(&[], &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), zero_offence_weight); + let zero_offence_weight = + ::DbWeight::get().reads_writes(4, 1); + assert_eq!( + Staking::on_offence(&[], &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), + zero_offence_weight + ); // On Offence with N offenders, Unapplied: 4 Reads, 1 Write + 4 Reads, 5 Writes - let n_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) - + ::DbWeight::get().reads_writes(4, 5); - - let offenders: Vec::AccountId, pallet_session::historical::IdentificationTuple>> - = (1..10).map(|i| - OffenceDetails { - offender: (i, Staking::eras_stakers(active_era(), i)), - reporters: vec![], - } - ).collect(); - assert_eq!(Staking::on_offence(&offenders, &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), n_offence_unapplied_weight); + let n_offence_unapplied_weight = ::DbWeight::get() + .reads_writes(4, 1) + + ::DbWeight::get().reads_writes(4, 5); + + let offenders: Vec< + OffenceDetails< + ::AccountId, + pallet_session::historical::IdentificationTuple, + >, + > = (1..10) + .map(|i| OffenceDetails { + offender: (i, Staking::eras_stakers(active_era(), i)), + reporters: vec![], + }) + .collect(); + assert_eq!( + Staking::on_offence( + &offenders, + &[Perbill::from_percent(50)], + 0, + DisableStrategy::WhenSlashed + ), + n_offence_unapplied_weight + ); // On Offence with one offenders, Applied - let one_offender = [ - OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), - reporters: vec![1], - }, - ]; + let one_offender = [OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), + reporters: vec![1], + }]; let n = 1; // Number of offenders let rw = 3 + 3 * n; // rw reads and writes - let one_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) - + ::DbWeight::get().reads_writes(rw, rw) + let one_offence_unapplied_weight = + ::DbWeight::get().reads_writes(4, 1) + + + ::DbWeight::get().reads_writes(rw, rw) // One `slash_cost` + ::DbWeight::get().reads_writes(6, 5) // `slash_cost` * nominators (1) + ::DbWeight::get().reads_writes(6, 5) // `reward_cost` * reporters (1) - + ::DbWeight::get().reads_writes(2, 2); + + ::DbWeight::get().reads_writes(2, 2) + ; - assert_eq!(Staking::on_offence(&one_offender, &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), one_offence_unapplied_weight); + assert_eq!( + Staking::on_offence( + &one_offender, + &[Perbill::from_percent(50)], + 0, + DisableStrategy::WhenSlashed + ), + one_offence_unapplied_weight + ); }); } @@ -4960,7 +5082,6 @@ fn proportional_ledger_slash_works() { unlocking: bounded_vec![], claimed_rewards: vec![], }; - assert_eq!(BondingDuration::get(), 3); // When we slash a ledger with no unlocking chunks @@ -4997,7 +5118,7 @@ fn proportional_ledger_slash_works() { ledger.total = 4 * 100; ledger.active = 0; // When the first 2 chunks don't overlap with the affected range of unlock eras. - assert_eq!(ledger.slash(140, 0, 2), 140); + assert_eq!(ledger.slash(140, 0, 3), 140); // Then assert_eq!(ledger.unlocking, vec![c(4, 100), c(5, 100), c(6, 30), c(7, 30)]); assert_eq!(ledger.total, 4 * 100 - 140); @@ -5039,7 +5160,7 @@ fn proportional_ledger_slash_works() { ledger.active = 500; ledger.total = 40 + 10 + 100 + 250 + 500; // 900 assert_eq!(ledger.total, 900); - // When we have a higher min balance + // When we have a higher min balance assert_eq!( ledger.slash( 900 / 2, @@ -5047,16 +5168,17 @@ fn proportional_ledger_slash_works() { * get swept */ 0 ), - 475 + 450 ); - let dust = (10 / 2) + (40 / 2); assert_eq!(ledger.active, 500 / 2); - assert_eq!(ledger.unlocking, vec![c(5, 100 / 2), c(7, 250 / 2)]); - assert_eq!(ledger.total, 900 / 2 - dust); + // the last chunk was not slashed 50% like all the rest, because some other earlier chunks got + // dusted. + assert_eq!(ledger.unlocking, vec![c(5, 100 / 2), c(7, 150)]); + assert_eq!(ledger.total, 900 / 2); assert_eq!(LedgerSlashPerEra::get().0, 500 / 2); assert_eq!( LedgerSlashPerEra::get().1, - BTreeMap::from([(4, 0), (5, 100 / 2), (6, 0), (7, 250 / 2)]) + BTreeMap::from([(4, 0), (5, 100 / 2), (6, 0), (7, 150)]) ); // Given @@ -5068,7 +5190,7 @@ fn proportional_ledger_slash_works() { ledger.slash( 500 + 10 + 250 + 100 / 2, // active + era 6 + era 7 + era 5 / 2 0, - 2 /* slash era 2+4 first, so the affected parts are era 2+4, era 3+4 and + 3 /* slash era 6 first, so the affected parts are era 6, era 7 and * ledge.active. This will cause the affected to go to zero, and then we will * start slashing older chunks */ ), @@ -5091,7 +5213,7 @@ fn proportional_ledger_slash_works() { ledger.slash( 351, // active + era 6 + era 7 + era 5 / 2 + 1 50, // min balance - everything slashed below 50 will get dusted - 2 /* slash era 2+4 first, so the affected parts are era 2+4, era 3+4 and + 3 /* slash era 3+3 first, so the affected parts are era 6, era 7 and * ledge.active. This will cause the affected to go to zero, and then we will * start slashing older chunks */ ), @@ -5108,9 +5230,8 @@ fn proportional_ledger_slash_works() { // Given let slash = u64::MAX as Balance * 2; - let value = slash - - (9 * 4) // The value of the other parts of ledger that will get slashed - + 1; + // The value of the other parts of ledger that will get slashed + let value = slash - (10 * 4); ledger.active = 10; ledger.unlocking = bounded_vec![c(4, 10), c(5, 10), c(6, 10), c(7, value)]; diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 1bdfb01bddc86..c5f584054d1f4 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_staking. @@ -86,9 +86,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - (43_992_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(43_992_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -96,9 +96,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra() -> Weight { - (75_827_000 as Weight) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) - .saturating_add(T::DbWeight::get().writes(7 as Weight)) + Weight::from_ref_time(75_827_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(7 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) @@ -110,20 +110,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:0) // Storage: BagsList ListBags (r:2 w:2) fn unbond() -> Weight { - (81_075_000 as Weight) - .saturating_add(T::DbWeight::get().reads(12 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + Weight::from_ref_time(81_075_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(8 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn withdraw_unbonded_update(s: u32, ) -> Weight { - (35_763_000 as Weight) + Weight::from_ref_time(35_763_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((57_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(57_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -139,9 +139,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - (66_938_000 as Weight) - .saturating_add(T::DbWeight::get().reads(13 as Weight)) - .saturating_add(T::DbWeight::get().writes(11 as Weight)) + Weight::from_ref_time(66_938_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(13 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(11 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinValidatorBond (r:1 w:0) @@ -155,19 +155,19 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - (52_943_000 as Weight) - .saturating_add(T::DbWeight::get().reads(11 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(52_943_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) fn kick(k: u32, ) -> Weight { - (23_264_000 as Weight) + Weight::from_ref_time(23_264_000 as RefTimeWeight) // Standard Error: 11_000 - .saturating_add((8_006_000 as Weight).saturating_mul(k as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(Weight::from_ref_time(8_006_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) @@ -181,12 +181,12 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) fn nominate(n: u32, ) -> Weight { - (56_596_000 as Weight) + Weight::from_ref_time(56_596_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((3_644_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(12 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(3_644_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) @@ -196,50 +196,50 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - (51_117_000 as Weight) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(51_117_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - (11_223_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(11_223_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - (19_826_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(19_826_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - (3_789_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_789_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - (3_793_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_793_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - (3_802_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_802_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - (3_762_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_762_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Invulnerables (r:0 w:1) fn set_invulnerables(v: u32, ) -> Weight { - (4_318_000 as Weight) + Weight::from_ref_time(4_318_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((10_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(10_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) @@ -255,20 +255,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:2) fn force_unstake(s: u32, ) -> Weight { - (65_265_000 as Weight) + Weight::from_ref_time(65_265_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_029_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(11 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(1_029_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Staking UnappliedSlashes (r:1 w:1) fn cancel_deferred_slash(s: u32, ) -> Weight { - (903_312_000 as Weight) + Weight::from_ref_time(903_312_000 as RefTimeWeight) // Standard Error: 56_000 - .saturating_add((4_968_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(4_968_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:0) @@ -281,13 +281,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:2 w:0) // Storage: System Account (r:2 w:2) fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (87_569_000 as Weight) + Weight::from_ref_time(87_569_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((24_232_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(10 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(24_232_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(10 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:0) @@ -301,13 +301,13 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:2 w:2) fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (98_839_000 as Weight) + Weight::from_ref_time(98_839_000 as RefTimeWeight) // Standard Error: 21_000 - .saturating_add((34_480_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(11 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(34_480_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) @@ -316,11 +316,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:0) // Storage: BagsList ListBags (r:2 w:2) fn rebond(l: u32, ) -> Weight { - (74_865_000 as Weight) + Weight::from_ref_time(74_865_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((64_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(9 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + .saturating_add(Weight::from_ref_time(64_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(9 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(8 as RefTimeWeight)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:1) @@ -332,12 +332,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn set_history_depth(e: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 62_000 - .saturating_add((22_829_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) + .saturating_add(Weight::from_ref_time(22_829_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((7 as RefTimeWeight).saturating_mul(e as RefTimeWeight))) } // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) @@ -353,12 +353,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) fn reap_stash(s: u32, ) -> Weight { - (70_933_000 as Weight) + Weight::from_ref_time(70_933_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_021_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(12 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(1_021_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) @@ -379,16 +379,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn new_era(v: u32, n: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 897_000 - .saturating_add((213_100_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(213_100_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 45_000 - .saturating_add((31_123_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(208 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(v as Weight))) - .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(Weight::from_ref_time(31_123_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(208 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:21 w:0) @@ -399,25 +399,25 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 116_000 - .saturating_add((23_745_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(23_745_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 116_000 - .saturating_add((22_497_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(22_497_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 3_968_000 - .saturating_add((20_676_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(202 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(v as Weight))) - .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(20_676_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(202 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Staking Validators (r:501 w:0) fn get_npos_targets(v: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 36_000 - .saturating_add((8_097_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) + .saturating_add(Weight::from_ref_time(8_097_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -426,8 +426,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - (7_041_000 as Weight) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(7_041_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -436,8 +436,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - (6_495_000 as Weight) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(6_495_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) @@ -450,16 +450,16 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - (62_014_000 as Weight) - .saturating_add(T::DbWeight::get().reads(11 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(62_014_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - (12_814_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(12_814_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -472,9 +472,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - (43_992_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(43_992_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -482,9 +482,9 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra() -> Weight { - (75_827_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) - .saturating_add(RocksDbWeight::get().writes(7 as Weight)) + Weight::from_ref_time(75_827_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(7 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) @@ -496,20 +496,20 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:0) // Storage: BagsList ListBags (r:2 w:2) fn unbond() -> Weight { - (81_075_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(12 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + Weight::from_ref_time(81_075_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(8 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn withdraw_unbonded_update(s: u32, ) -> Weight { - (35_763_000 as Weight) + Weight::from_ref_time(35_763_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((57_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(57_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -525,9 +525,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - (66_938_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(13 as Weight)) - .saturating_add(RocksDbWeight::get().writes(11 as Weight)) + Weight::from_ref_time(66_938_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(13 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(11 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinValidatorBond (r:1 w:0) @@ -541,19 +541,19 @@ impl WeightInfo for () { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - (52_943_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(11 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(52_943_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) fn kick(k: u32, ) -> Weight { - (23_264_000 as Weight) + Weight::from_ref_time(23_264_000 as RefTimeWeight) // Standard Error: 11_000 - .saturating_add((8_006_000 as Weight).saturating_mul(k as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(Weight::from_ref_time(8_006_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) @@ -567,12 +567,12 @@ impl WeightInfo for () { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) fn nominate(n: u32, ) -> Weight { - (56_596_000 as Weight) + Weight::from_ref_time(56_596_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((3_644_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(12 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(3_644_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) @@ -582,50 +582,50 @@ impl WeightInfo for () { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - (51_117_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(51_117_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - (11_223_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(11_223_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - (19_826_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(19_826_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - (3_789_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_789_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - (3_793_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_793_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - (3_802_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_802_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - (3_762_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_762_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Invulnerables (r:0 w:1) fn set_invulnerables(v: u32, ) -> Weight { - (4_318_000 as Weight) + Weight::from_ref_time(4_318_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((10_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(10_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) @@ -641,20 +641,20 @@ impl WeightInfo for () { // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:2) fn force_unstake(s: u32, ) -> Weight { - (65_265_000 as Weight) + Weight::from_ref_time(65_265_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_029_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(11 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(1_029_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Staking UnappliedSlashes (r:1 w:1) fn cancel_deferred_slash(s: u32, ) -> Weight { - (903_312_000 as Weight) + Weight::from_ref_time(903_312_000 as RefTimeWeight) // Standard Error: 56_000 - .saturating_add((4_968_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(4_968_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:0) @@ -667,13 +667,13 @@ impl WeightInfo for () { // Storage: Staking Payee (r:2 w:0) // Storage: System Account (r:2 w:2) fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (87_569_000 as Weight) + Weight::from_ref_time(87_569_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((24_232_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(10 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(24_232_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(10 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:0) @@ -687,13 +687,13 @@ impl WeightInfo for () { // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:2 w:2) fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (98_839_000 as Weight) + Weight::from_ref_time(98_839_000 as RefTimeWeight) // Standard Error: 21_000 - .saturating_add((34_480_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(11 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(34_480_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) @@ -702,11 +702,11 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:0) // Storage: BagsList ListBags (r:2 w:2) fn rebond(l: u32, ) -> Weight { - (74_865_000 as Weight) + Weight::from_ref_time(74_865_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((64_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(9 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + .saturating_add(Weight::from_ref_time(64_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(9 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(8 as RefTimeWeight)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:1) @@ -718,12 +718,12 @@ impl WeightInfo for () { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn set_history_depth(e: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 62_000 - .saturating_add((22_829_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) + .saturating_add(Weight::from_ref_time(22_829_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((7 as RefTimeWeight).saturating_mul(e as RefTimeWeight))) } // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) @@ -739,12 +739,12 @@ impl WeightInfo for () { // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) fn reap_stash(s: u32, ) -> Weight { - (70_933_000 as Weight) + Weight::from_ref_time(70_933_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_021_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(12 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(1_021_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) @@ -765,16 +765,16 @@ impl WeightInfo for () { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn new_era(v: u32, n: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 897_000 - .saturating_add((213_100_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(213_100_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 45_000 - .saturating_add((31_123_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(208 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(v as Weight))) - .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(Weight::from_ref_time(31_123_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(208 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:21 w:0) @@ -785,25 +785,25 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 116_000 - .saturating_add((23_745_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(23_745_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 116_000 - .saturating_add((22_497_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(22_497_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 3_968_000 - .saturating_add((20_676_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(202 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(v as Weight))) - .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(20_676_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(202 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Staking Validators (r:501 w:0) fn get_npos_targets(v: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 36_000 - .saturating_add((8_097_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) + .saturating_add(Weight::from_ref_time(8_097_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -812,8 +812,8 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - (7_041_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(7_041_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -822,8 +822,8 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - (6_495_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(6_495_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) @@ -836,15 +836,15 @@ impl WeightInfo for () { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - (62_014_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(11 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(62_014_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - (12_814_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(12_814_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index 5a93701141da9..f8a3b1ba3aab4 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -30,7 +30,7 @@ sp-std = { default-features = false, path = "../../primitives/std" } substrate-state-trie-migration-rpc = { optional = true, path = "../../utils/frame/rpc/state-trie-migration-rpc" } [dev-dependencies] -parking_lot = "0.12.0" +parking_lot = "0.12.1" tokio = { version = "1.17.0", features = ["macros"] } pallet-balances = { path = "../balances" } sp-tracing = { path = "../../primitives/tracing" } diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 9ac128a0e3108..93b5bd3468f6b 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -56,6 +56,7 @@ #![cfg_attr(not(feature = "std"), no_std)] pub use pallet::*; +pub mod weights; const LOG_TARGET: &str = "runtime::state-trie-migration"; @@ -71,6 +72,9 @@ macro_rules! log { #[frame_support::pallet] pub mod pallet { + + pub use crate::weights::WeightInfo; + use frame_support::{ dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, ensure, @@ -90,41 +94,6 @@ pub mod pallet { pub(crate) type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; - /// The weight information of this pallet. - pub trait WeightInfo { - fn process_top_key(x: u32) -> Weight; - fn continue_migrate() -> Weight; - fn continue_migrate_wrong_witness() -> Weight; - fn migrate_custom_top_fail() -> Weight; - fn migrate_custom_top_success() -> Weight; - fn migrate_custom_child_fail() -> Weight; - fn migrate_custom_child_success() -> Weight; - } - - impl WeightInfo for () { - fn process_top_key(_: u32) -> Weight { - 1000000 - } - fn continue_migrate() -> Weight { - 1000000 - } - fn continue_migrate_wrong_witness() -> Weight { - 1000000 - } - fn migrate_custom_top_fail() -> Weight { - 1000000 - } - fn migrate_custom_top_success() -> Weight { - 1000000 - } - fn migrate_custom_child_fail() -> Weight { - 1000000 - } - fn migrate_custom_child_success() -> Weight { - 1000000 - } - } - /// The progress of either the top or child keys. #[derive( CloneNoBound, @@ -266,7 +235,10 @@ pub mod pallet { /// reading a key, we simply cannot know how many bytes it is. In other words, this should /// not be used in any environment where resources are strictly bounded (e.g. a parachain), /// but it is acceptable otherwise (relay chain, offchain workers). - pub fn migrate_until_exhaustion(&mut self, limits: MigrationLimits) -> DispatchResult { + pub fn migrate_until_exhaustion( + &mut self, + limits: MigrationLimits, + ) -> Result<(), Error> { log!(debug, "running migrations on top of {:?} until {:?}", self, limits); if limits.item.is_zero() || limits.size.is_zero() { @@ -293,7 +265,7 @@ pub mod pallet { /// Migrate AT MOST ONE KEY. This can be either a top or a child key. /// /// This function is *the* core of this entire pallet. - fn migrate_tick(&mut self) -> DispatchResult { + fn migrate_tick(&mut self) -> Result<(), Error> { match (&self.progress_top, &self.progress_child) { (Progress::ToStart, _) => self.migrate_top(), (Progress::LastKey(_), Progress::LastKey(_)) => { @@ -332,7 +304,7 @@ pub mod pallet { /// Migrate the current child key, setting it to its new value, if one exists. /// /// It updates the dynamic counters. - fn migrate_child(&mut self) -> DispatchResult { + fn migrate_child(&mut self) -> Result<(), Error> { use sp_io::default_child_storage as child_io; let (maybe_current_child, child_root) = match (&self.progress_child, &self.progress_top) { @@ -381,7 +353,7 @@ pub mod pallet { /// Migrate the current top key, setting it to its new value, if one exists. /// /// It updates the dynamic counters. - fn migrate_top(&mut self) -> DispatchResult { + fn migrate_top(&mut self) -> Result<(), Error> { let maybe_current_top = match &self.progress_top { Progress::LastKey(last_top) => { let maybe_top: Option> = @@ -462,7 +434,7 @@ pub mod pallet { /// The auto migration task finished. AutoMigrationFinished, /// Migration got halted due to an error or miss-configuration. - Halted, + Halted { error: Error }, } /// The outer Pallet struct. @@ -547,8 +519,9 @@ pub mod pallet { pub type SignedMigrationMaxLimits = StorageValue<_, MigrationLimits, OptionQuery>; #[pallet::error] + #[derive(Clone, PartialEq)] pub enum Error { - /// max signed limits not respected. + /// Max signed limits not respected. MaxSignedLimits, /// A key was longer than the configured maximum. /// @@ -560,12 +533,12 @@ pub mod pallet { KeyTooLong, /// submitter does not have enough funds. NotEnoughFunds, - /// bad witness data provided. + /// Bad witness data provided. BadWitness, - /// upper bound of size is exceeded, - SizeUpperBoundExceeded, /// Signed migration is not allowed because the maximum limit is not set yet. SignedMigrationNotAllowed, + /// Bad child root provided. + BadChildRoot, } #[pallet::call] @@ -648,7 +621,7 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); - return Err(Error::::SizeUpperBoundExceeded.into()) + return Ok(().into()) } Self::deposit_event(Event::::Migrated { @@ -665,13 +638,10 @@ pub mod pallet { MigrationProcess::::put(task); let post_info = PostDispatchInfo { actual_weight, pays_fee: Pays::No }; - match migration { - Ok(_) => Ok(post_info), - Err(error) => { - Self::halt(&error); - Err(DispatchErrorWithPostInfo { post_info, error }) - }, + if let Err(error) = migration { + Self::halt(error); } + Ok(post_info) } /// Migrate the list of top keys by iterating each of them one by one. @@ -710,7 +680,7 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); - Err("wrong witness data".into()) + Ok(().into()) } else { Self::deposit_event(Event::::Migrated { top: keys.len() as u32, @@ -772,12 +742,9 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); debug_assert!(_remainder.is_zero()); Self::deposit_event(Event::::Slashed { who, amount: deposit }); - Err(DispatchErrorWithPostInfo { - error: "bad witness".into(), - post_info: PostDispatchInfo { - actual_weight: Some(T::WeightInfo::migrate_custom_child_fail()), - pays_fee: Pays::Yes, - }, + Ok(PostDispatchInfo { + actual_weight: Some(T::WeightInfo::migrate_custom_child_fail()), + pays_fee: Pays::Yes, }) } else { Self::deposit_event(Event::::Migrated { @@ -837,7 +804,7 @@ pub mod pallet { if let Some(limits) = Self::auto_limits() { let mut task = Self::migration_process(); if let Err(e) = task.migrate_until_exhaustion(limits) { - Self::halt(&e); + Self::halt(e); } let weight = Self::dynamic_weight(task.dyn_total_items(), task.dyn_size); @@ -872,18 +839,19 @@ pub mod pallet { impl Pallet { /// The real weight of a migration of the given number of `items` with total `size`. fn dynamic_weight(items: u32, size: u32) -> frame_support::pallet_prelude::Weight { - let items = items as Weight; - items - .saturating_mul(::DbWeight::get().reads_writes(1, 1)) + let items = items as u64; + ::DbWeight::get() + .reads_writes(1, 1) + .scalar_saturating_mul(items) // we assume that the read/write per-byte weight is the same for child and top tree. .saturating_add(T::WeightInfo::process_top_key(size)) } /// Put a stop to all ongoing migrations and logs an error. - fn halt(msg: &E) { - log!(error, "migration halted due to: {:?}", msg); + fn halt(error: Error) { + log!(error, "migration halted due to: {:?}", error); AutoLimits::::kill(); - Self::deposit_event(Event::::Halted); + Self::deposit_event(Event::::Halted { error }); } /// Convert a child root key, aka. "Child-bearing top key" into the proper format. @@ -902,7 +870,7 @@ pub mod pallet { fn transform_child_key_or_halt(root: &Vec) -> &[u8] { let key = Self::transform_child_key(root); if key.is_none() { - Self::halt("bad child root key"); + Self::halt(Error::::BadChildRoot); } key.unwrap_or_default() } @@ -992,8 +960,16 @@ mod benchmarks { frame_system::RawOrigin::Signed(caller.clone()).into(), vec![b"foo".to_vec()], 1, - ).is_err() - ) + ).is_ok() + ); + + frame_system::Pallet::::assert_last_event( + ::Event::from(crate::Event::Slashed { + who: caller.clone(), + amount: T::SignedDepositBase::get() + .saturating_add(T::SignedDepositPerItem::get().saturating_mul(1u32.into())), + }).into(), + ); } verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); @@ -1036,7 +1012,7 @@ mod benchmarks { StateTrieMigration::::childify("top"), vec![b"foo".to_vec()], 1, - ).is_err() + ).is_ok() ) } verify { @@ -1072,6 +1048,7 @@ mod mock { use frame_support::{ parameter_types, traits::{ConstU32, ConstU64, Hooks}, + weights::Weight, }; use frame_system::{EnsureRoot, EnsureSigned}; use sp_core::{ @@ -1148,6 +1125,33 @@ mod mock { type WeightInfo = (); } + /// Test only Weights for state migration. + pub struct StateMigrationTestWeight; + + impl WeightInfo for StateMigrationTestWeight { + fn process_top_key(_: u32) -> Weight { + Weight::from_ref_time(1000000) + } + fn continue_migrate() -> Weight { + Weight::from_ref_time(1000000) + } + fn continue_migrate_wrong_witness() -> Weight { + Weight::from_ref_time(1000000) + } + fn migrate_custom_top_fail() -> Weight { + Weight::from_ref_time(1000000) + } + fn migrate_custom_top_success() -> Weight { + Weight::from_ref_time(1000000) + } + fn migrate_custom_child_fail() -> Weight { + Weight::from_ref_time(1000000) + } + fn migrate_custom_child_success() -> Weight { + Weight::from_ref_time(1000000) + } + } + impl pallet_state_trie_migration::Config for Test { type Event = Event; type ControlOrigin = EnsureRoot; @@ -1156,7 +1160,7 @@ mod mock { type SignedDepositPerItem = SignedDepositPerItem; type SignedDepositBase = SignedDepositBase; type SignedFilter = EnsureSigned; - type WeightInfo = (); + type WeightInfo = StateMigrationTestWeight; } pub fn new_test_ext( @@ -1240,9 +1244,9 @@ mod mock { (custom_storage, version).into() } - pub(crate) fn run_to_block(n: u32) -> (H256, u64) { + pub(crate) fn run_to_block(n: u32) -> (H256, Weight) { let mut root = Default::default(); - let mut weight_sum = 0; + let mut weight_sum = Weight::new(); log::trace!(target: LOG_TARGET, "running from {:?} to {:?}", System::block_number(), n); while System::block_number() < n { System::set_block_number(System::block_number() + 1); @@ -1250,7 +1254,7 @@ mod mock { weight_sum += StateTrieMigration::on_initialize(System::block_number()); - root = System::finalize().state_root().clone(); + root = *System::finalize().state_root(); System::on_finalize(System::block_number()); } (root, weight_sum) @@ -1288,18 +1292,16 @@ mod test { SignedMigrationMaxLimits::::put(MigrationLimits { size: 1 << 20, item: 50 }); // fails if the top key is too long. - frame_support::assert_err_with_weight!( - StateTrieMigration::continue_migrate( - Origin::signed(1), - MigrationLimits { item: 50, size: 1 << 20 }, - Bounded::max_value(), - MigrationProcess::::get() - ), - Error::::KeyTooLong, - Some(2000000), - ); + frame_support::assert_ok!(StateTrieMigration::continue_migrate( + Origin::signed(1), + MigrationLimits { item: 50, size: 1 << 20 }, + Bounded::max_value(), + MigrationProcess::::get() + ),); // The auto migration halted. - System::assert_last_event(crate::Event::Halted {}.into()); + System::assert_last_event( + crate::Event::Halted { error: Error::::KeyTooLong }.into(), + ); // Limits are killed. assert!(AutoLimits::::get().is_none()); @@ -1325,18 +1327,16 @@ mod test { SignedMigrationMaxLimits::::put(MigrationLimits { size: 1 << 20, item: 50 }); // fails if the top key is too long. - frame_support::assert_err_with_weight!( - StateTrieMigration::continue_migrate( - Origin::signed(1), - MigrationLimits { item: 50, size: 1 << 20 }, - Bounded::max_value(), - MigrationProcess::::get() - ), - Error::::KeyTooLong, - Some(2000000), - ); + frame_support::assert_ok!(StateTrieMigration::continue_migrate( + Origin::signed(1), + MigrationLimits { item: 50, size: 1 << 20 }, + Bounded::max_value(), + MigrationProcess::::get() + )); // The auto migration halted. - System::assert_last_event(crate::Event::Halted {}.into()); + System::assert_last_event( + crate::Event::Halted { error: Error::::KeyTooLong }.into(), + ); // Limits are killed. assert!(AutoLimits::::get().is_none()); @@ -1487,7 +1487,7 @@ mod test { ..Default::default() } ), - Error::::BadWitness + Error::::BadWitness, ); // migrate all keys in a series of submissions @@ -1550,14 +1550,11 @@ mod test { assert_eq!(Balances::free_balance(&1), 1000); // note that we don't expect this to be a noop -- we do slash. - frame_support::assert_err!( - StateTrieMigration::migrate_custom_top( - Origin::signed(1), - vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], - correct_witness - 1, - ), - "wrong witness data" - ); + frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( + Origin::signed(1), + vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], + correct_witness - 1, + ),); // no funds should remain reserved. assert_eq!(Balances::reserved_balance(&1), 0); @@ -1587,13 +1584,12 @@ mod test { assert_eq!(Balances::free_balance(&1), 1000); // note that we don't expect this to be a noop -- we do slash. - assert!(StateTrieMigration::migrate_custom_child( + frame_support::assert_ok!(StateTrieMigration::migrate_custom_child( Origin::signed(1), StateTrieMigration::childify("chk1"), vec![b"key1".to_vec(), b"key2".to_vec()], 999999, // wrong witness - ) - .is_err()); + )); // no funds should remain reserved. assert_eq!(Balances::reserved_balance(&1), 0); @@ -1611,7 +1607,10 @@ pub(crate) mod remote_tests { use crate::{AutoLimits, MigrationLimits, Pallet as StateTrieMigration, LOG_TARGET}; use codec::Encode; use frame_benchmarking::Zero; - use frame_support::traits::{Get, Hooks}; + use frame_support::{ + traits::{Get, Hooks}, + weights::Weight, + }; use frame_system::Pallet as System; use remote_externalities::Mode; use sp_core::H256; @@ -1621,9 +1620,9 @@ pub(crate) mod remote_tests { #[allow(dead_code)] fn run_to_block>( n: ::BlockNumber, - ) -> (H256, u64) { + ) -> (H256, Weight) { let mut root = Default::default(); - let mut weight_sum = 0; + let mut weight_sum = Weight::new(); while System::::block_number() < n { System::::set_block_number(System::::block_number() + One::one()); System::::on_initialize(System::::block_number()); diff --git a/frame/state-trie-migration/src/weights.rs b/frame/state-trie-migration/src/weights.rs index f2566f949c058..6fccc0b68f3d7 100644 --- a/frame/state-trie-migration/src/weights.rs +++ b/frame/state-trie-migration/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_state_trie_migration. @@ -59,40 +59,40 @@ impl WeightInfo for SubstrateWeight { // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) // Storage: StateTrieMigration MigrationProcess (r:1 w:1) fn continue_migrate() -> Weight { - (19_019_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_019_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) fn continue_migrate_wrong_witness() -> Weight { - (1_874_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(1_874_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } fn migrate_custom_top_success() -> Weight { - (16_381_000 as Weight) + Weight::from_ref_time(16_381_000 as RefTimeWeight) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_top_fail() -> Weight { - (25_966_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_966_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } fn migrate_custom_child_success() -> Weight { - (16_712_000 as Weight) + Weight::from_ref_time(16_712_000 as RefTimeWeight) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_child_fail() -> Weight { - (29_885_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(29_885_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: unknown [0x6b6579] (r:1 w:1) fn process_top_key(v: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -101,39 +101,39 @@ impl WeightInfo for () { // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) // Storage: StateTrieMigration MigrationProcess (r:1 w:1) fn continue_migrate() -> Weight { - (19_019_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_019_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) fn continue_migrate_wrong_witness() -> Weight { - (1_874_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(1_874_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } fn migrate_custom_top_success() -> Weight { - (16_381_000 as Weight) + Weight::from_ref_time(16_381_000 as RefTimeWeight) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_top_fail() -> Weight { - (25_966_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_966_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } fn migrate_custom_child_success() -> Weight { - (16_712_000 as Weight) + Weight::from_ref_time(16_712_000 as RefTimeWeight) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_child_fail() -> Weight { - (29_885_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(29_885_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: unknown [0x6b6579] (r:1 w:1) fn process_top_key(v: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/sudo/README.md b/frame/sudo/README.md index e8f688091e326..7342832d2d7a7 100644 --- a/frame/sudo/README.md +++ b/frame/sudo/README.md @@ -72,6 +72,6 @@ You need to set an initial superuser account as the sudo `key`. [`Call`]: ./enum.Call.html [`Config`]: ./trait.Config.html -[`Origin`]: https://docs.substrate.io/v3/runtime/origins +[`Origin`]: https://docs.substrate.io/main-docs/build/origins/ License: Apache-2.0 diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index d9e72b37f2970..bde69f11106dc 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -88,7 +88,7 @@ //! //! * [Democracy](../pallet_democracy/index.html) //! -//! [`Origin`]: https://docs.substrate.io/v3/runtime/origins +//! [`Origin`]: https://docs.substrate.io/main-docs/build/origins/ #![cfg_attr(not(feature = "std"), no_std)] @@ -104,6 +104,8 @@ mod tests; pub use pallet::*; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::{DispatchResult, *}; @@ -137,7 +139,7 @@ pub mod pallet { /// # #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); - (dispatch_info.weight.saturating_add(10_000), dispatch_info.class) + (dispatch_info.weight, dispatch_info.class) })] pub fn sudo( origin: OriginFor, @@ -192,7 +194,7 @@ pub mod pallet { #[pallet::weight(0)] pub fn set_key( origin: OriginFor, - new: ::Source, + new: AccountIdLookupOf, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; @@ -220,7 +222,6 @@ pub mod pallet { let dispatch_info = call.get_dispatch_info(); ( dispatch_info.weight - .saturating_add(10_000) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)), dispatch_info.class, @@ -228,7 +229,7 @@ pub mod pallet { })] pub fn sudo_as( origin: OriginFor, - who: ::Source, + who: AccountIdLookupOf, call: Box<::Call>, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 2e2a4abafcd98..c895eaf830136 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -22,6 +22,7 @@ use crate as sudo; use frame_support::{ parameter_types, traits::{ConstU32, ConstU64, Contains, GenesisBuild}, + weights::Weight, }; use frame_system::limits; use sp_core::H256; @@ -44,7 +45,6 @@ pub mod logger { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - #[pallet::without_storage_info] pub struct Pallet(PhantomData); #[pallet::call] @@ -57,7 +57,7 @@ pub mod logger { ) -> DispatchResultWithPostInfo { // Ensure that the `origin` is `Root`. ensure_root(origin)?; - >::append(i); + >::try_append(i).map_err(|_| "could not append")?; Self::deposit_event(Event::AppendI32 { value: i, weight }); Ok(().into()) } @@ -70,8 +70,8 @@ pub mod logger { ) -> DispatchResultWithPostInfo { // Ensure that the `origin` is some signed account. let sender = ensure_signed(origin)?; - >::append(i); - >::append(sender.clone()); + >::try_append(i).map_err(|_| "could not append")?; + >::try_append(sender.clone()).map_err(|_| "could not append")?; Self::deposit_event(Event::AppendI32AndAccount { sender, value: i, weight }); Ok(().into()) } @@ -86,11 +86,12 @@ pub mod logger { #[pallet::storage] #[pallet::getter(fn account_log)] - pub(super) type AccountLog = StorageValue<_, Vec, ValueQuery>; + pub(super) type AccountLog = + StorageValue<_, BoundedVec>, ValueQuery>; #[pallet::storage] #[pallet::getter(fn i32_log)] - pub(super) type I32Log = StorageValue<_, Vec, ValueQuery>; + pub(super) type I32Log = StorageValue<_, BoundedVec>, ValueQuery>; } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -109,7 +110,7 @@ frame_support::construct_runtime!( ); parameter_types! { - pub BlockWeights: limits::BlockWeights = limits::BlockWeights::simple_max(1024); + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::simple_max(Weight::from_ref_time(1024)); } pub struct BlockEverything; diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index 84c8e0c5c254e..502c6476935a2 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for the module. use super::*; -use frame_support::{assert_noop, assert_ok}; +use frame_support::{assert_noop, assert_ok, weights::Weight}; use mock::{ new_test_ext, Call, Event as TestEvent, Logger, LoggerCall, Origin, Sudo, SudoCall, System, Test, @@ -39,12 +39,18 @@ fn sudo_basics() { // Configure a default test environment and set the root `key` to 1. new_test_ext(1).execute_with(|| { // A privileged function should work when `sudo` is passed the root `key` as `origin`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1_000), + })); assert_ok!(Sudo::sudo(Origin::signed(1), call)); assert_eq!(Logger::i32_log(), vec![42i32]); // A privileged function should not work when `sudo` is passed a non-root `key` as `origin`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1_000), + })); assert_noop!(Sudo::sudo(Origin::signed(2), call), Error::::RequireSudo); }); } @@ -56,7 +62,8 @@ fn sudo_emits_events_correctly() { System::set_block_number(1); // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 })); + let call = + Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::one() })); assert_ok!(Sudo::sudo(Origin::signed(1), call)); System::assert_has_event(TestEvent::Sudo(Event::Sudid { sudo_result: Ok(()) })); }) @@ -66,24 +73,36 @@ fn sudo_emits_events_correctly() { fn sudo_unchecked_weight_basics() { new_test_ext(1).execute_with(|| { // A privileged function should work when `sudo` is passed the root `key` as origin. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); - assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000)); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1_000), + })); + assert_ok!(Sudo::sudo_unchecked_weight( + Origin::signed(1), + call, + Weight::from_ref_time(1_000) + )); assert_eq!(Logger::i32_log(), vec![42i32]); // A privileged function should not work when called with a non-root `key`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1_000), + })); assert_noop!( - Sudo::sudo_unchecked_weight(Origin::signed(2), call, 1_000), + Sudo::sudo_unchecked_weight(Origin::signed(2), call, Weight::from_ref_time(1_000)), Error::::RequireSudo, ); // `I32Log` is unchanged after unsuccessful call. assert_eq!(Logger::i32_log(), vec![42i32]); // Controls the dispatched weight. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 })); - let sudo_unchecked_weight_call = SudoCall::sudo_unchecked_weight { call, weight: 1_000 }; + let call = + Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::one() })); + let sudo_unchecked_weight_call = + SudoCall::sudo_unchecked_weight { call, weight: Weight::from_ref_time(1_000) }; let info = sudo_unchecked_weight_call.get_dispatch_info(); - assert_eq!(info.weight, 1_000); + assert_eq!(info.weight, Weight::from_ref_time(1_000)); }); } @@ -94,8 +113,13 @@ fn sudo_unchecked_weight_emits_events_correctly() { System::set_block_number(1); // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 })); - assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000)); + let call = + Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::one() })); + assert_ok!(Sudo::sudo_unchecked_weight( + Origin::signed(1), + call, + Weight::from_ref_time(1_000) + )); System::assert_has_event(TestEvent::Sudo(Event::Sudid { sudo_result: Ok(()) })); }) } @@ -134,17 +158,22 @@ fn set_key_emits_events_correctly() { fn sudo_as_basics() { new_test_ext(1).execute_with(|| { // A privileged function will not work when passed to `sudo_as`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1_000), + })); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); assert!(Logger::i32_log().is_empty()); assert!(Logger::account_log().is_empty()); // A non-privileged function should not work when called with a non-root `key`. - let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 })); + let call = + Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::one() })); assert_noop!(Sudo::sudo_as(Origin::signed(3), 2, call), Error::::RequireSudo); // A non-privileged function will work when passed to `sudo_as` with the root `key`. - let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 })); + let call = + Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::one() })); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); assert_eq!(Logger::i32_log(), vec![42i32]); // The correct user makes the call within `sudo_as`. @@ -159,7 +188,8 @@ fn sudo_as_emits_events_correctly() { System::set_block_number(1); // A non-privileged function will work when passed to `sudo_as` with the root `key`. - let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 })); + let call = + Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::one() })); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); System::assert_has_event(TestEvent::Sudo(Event::SudoAsDone { sudo_result: Ok(()) })); }); diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index ca26d3a5e32f2..6f0831b751b45 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -17,6 +17,7 @@ serde = { version = "1.0.136", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-metadata = { version = "15.0.0", default-features = false, features = ["v14"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } @@ -38,7 +39,7 @@ sp-core-hashing-proc-macro = { version = "5.0.0", path = "../../primitives/core/ k256 = { version = "0.10.4", default-features = false, features = ["ecdsa"] } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" assert_matches = "1.3.0" pretty_assertions = "1.2.1" frame-system = { version = "4.0.0-dev", path = "../system" } @@ -49,6 +50,7 @@ default = ["std"] std = [ "once_cell", "serde", + "sp-api/std", "sp-io/std", "codec/std", "scale-info/std", @@ -71,3 +73,7 @@ no-metadata-docs = ["frame-support-procedural/no-metadata-docs"] # By default some types have documentation, `full-metadata-docs` allows to add documentation to # more types in the metadata. full-metadata-docs = ["scale-info/docs"] +# Generate impl-trait for tuples with the given number of tuples. Will be needed as the number of +# pallets in a runtime grows. Does increase the compile time! +tuples-96 = [] +tuples-128 = [] diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index 7ddec39cad9fb..06b8056aff982 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -16,6 +16,8 @@ proc-macro = true [dependencies] Inflector = "0.11.4" +cfg-expr = "0.10.3" +itertools = "0.10.3" proc-macro2 = "1.0.37" quote = "1.0.10" syn = { version = "1.0.98", features = ["full"] } diff --git a/frame/support/procedural/src/construct_runtime/expand/call.rs b/frame/support/procedural/src/construct_runtime/expand/call.rs index c8c5d5ff0ee43..028e68f0198d1 100644 --- a/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -18,6 +18,7 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; +use std::str::FromStr; use syn::Ident; pub fn expand_outer_dispatch( @@ -30,6 +31,7 @@ pub fn expand_outer_dispatch( let mut variant_patterns = Vec::new(); let mut query_call_part_macros = Vec::new(); let mut pallet_names = Vec::new(); + let mut pallet_attrs = Vec::new(); let system_path = &system_pallet.path; let pallets_with_call = pallet_decls.iter().filter(|decl| decl.exists_part("Call")); @@ -38,12 +40,24 @@ pub fn expand_outer_dispatch( let name = &pallet_declaration.name; let path = &pallet_declaration.path; let index = pallet_declaration.index; + let attr = + pallet_declaration.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); - variant_defs.extend( - quote!(#[codec(index = #index)] #name( #scrate::dispatch::CallableCallFor<#name, #runtime> ),), - ); + variant_defs.extend(quote! { + #attr + #[codec(index = #index)] + #name( #scrate::dispatch::CallableCallFor<#name, #runtime> ), + }); variant_patterns.push(quote!(Call::#name(call))); pallet_names.push(name); + pallet_attrs.push(attr); query_call_part_macros.push(quote! { #path::__substrate_call_check::is_call_part_defined!(#name); }); @@ -69,6 +83,7 @@ pub fn expand_outer_dispatch( use #scrate::dispatch::Callable; use core::mem::size_of; &[#( + #pallet_attrs ( stringify!(#pallet_names), size_of::< <#pallet_names as Callable<#runtime>>::Call >(), @@ -101,7 +116,10 @@ pub fn expand_outer_dispatch( impl #scrate::dispatch::GetDispatchInfo for Call { fn get_dispatch_info(&self) -> #scrate::dispatch::DispatchInfo { match self { - #( #variant_patterns => call.get_dispatch_info(), )* + #( + #pallet_attrs + #variant_patterns => call.get_dispatch_info(), + )* } } } @@ -110,6 +128,7 @@ pub fn expand_outer_dispatch( use #scrate::dispatch::GetCallName; match self { #( + #pallet_attrs #variant_patterns => { let function_name = call.get_call_name(); let pallet_name = stringify!(#pallet_names); @@ -121,6 +140,7 @@ pub fn expand_outer_dispatch( fn get_module_names() -> &'static [&'static str] { &[#( + #pallet_attrs stringify!(#pallet_names), )*] } @@ -129,6 +149,7 @@ pub fn expand_outer_dispatch( use #scrate::dispatch::{Callable, GetCallName}; match module { #( + #pallet_attrs stringify!(#pallet_names) => <<#pallet_names as Callable<#runtime>>::Call as GetCallName>::get_call_names(), @@ -157,27 +178,16 @@ pub fn expand_outer_dispatch( fn dispatch_bypass_filter(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { match self { #( + #pallet_attrs #variant_patterns => #scrate::traits::UnfilteredDispatchable::dispatch_bypass_filter(call, origin), )* } } } - impl #scrate::traits::DispatchableWithStorageLayer for Call { - type Origin = Origin; - fn dispatch_with_storage_layer(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { - #scrate::storage::with_storage_layer(|| { - #scrate::dispatch::Dispatchable::dispatch(self, origin) - }) - } - fn dispatch_bypass_filter_with_storage_layer(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { - #scrate::storage::with_storage_layer(|| { - #scrate::traits::UnfilteredDispatchable::dispatch_bypass_filter(self, origin) - }) - } - } #( + #pallet_attrs impl #scrate::traits::IsSubType<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { #[allow(unreachable_patterns)] fn is_sub_type(&self) -> Option<&#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> { @@ -189,6 +199,7 @@ pub fn expand_outer_dispatch( } } + #pallet_attrs impl From<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { fn from(call: #scrate::dispatch::CallableCallFor<#pallet_names, #runtime>) -> Self { #variant_patterns diff --git a/frame/support/procedural/src/construct_runtime/expand/config.rs b/frame/support/procedural/src/construct_runtime/expand/config.rs index a3d70f18529c7..9b731a5825a3c 100644 --- a/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -19,6 +19,7 @@ use crate::construct_runtime::Pallet; use inflector::Inflector; use proc_macro2::TokenStream; use quote::{format_ident, quote, ToTokens}; +use std::str::FromStr; use syn::Ident; pub fn expand_outer_config( @@ -40,11 +41,19 @@ pub fn expand_outer_config( let field_name = &Ident::new(&pallet_name.to_string().to_snake_case(), decl.name.span()); let part_is_generic = !pallet_entry.generics.params.is_empty(); + let attr = &decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); - types.extend(expand_config_types(runtime, decl, &config, part_is_generic)); - fields.extend(quote!(pub #field_name: #config,)); + types.extend(expand_config_types(attr, runtime, decl, &config, part_is_generic)); + fields.extend(quote!(#attr pub #field_name: #config,)); build_storage_calls - .extend(expand_config_build_storage_call(scrate, runtime, decl, field_name)); + .extend(expand_config_build_storage_call(scrate, attr, runtime, decl, field_name)); query_genesis_config_part_macros.push(quote! { #path::__substrate_genesis_config_check::is_genesis_config_defined!(#pallet_name); #[cfg(feature = "std")] @@ -88,6 +97,7 @@ pub fn expand_outer_config( } fn expand_config_types( + attr: &TokenStream, runtime: &Ident, decl: &Pallet, config: &Ident, @@ -97,14 +107,17 @@ fn expand_config_types( match (decl.instance.as_ref(), part_is_generic) { (Some(inst), true) => quote! { + #attr #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig<#runtime, #path::#inst>; }, (None, true) => quote! { + #attr #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig<#runtime>; }, (_, false) => quote! { + #attr #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig; }, @@ -113,6 +126,7 @@ fn expand_config_types( fn expand_config_build_storage_call( scrate: &TokenStream, + attr: &TokenStream, runtime: &Ident, decl: &Pallet, field_name: &Ident, @@ -125,6 +139,7 @@ fn expand_config_build_storage_call( }; quote! { + #attr #scrate::sp_runtime::BuildModuleGenesisStorage:: <#runtime, #instance>::build_module_genesis_storage(&self.#field_name, storage)?; } diff --git a/frame/support/procedural/src/construct_runtime/expand/event.rs b/frame/support/procedural/src/construct_runtime/expand/event.rs index b242f9641562c..f145327d37af5 100644 --- a/frame/support/procedural/src/construct_runtime/expand/event.rs +++ b/frame/support/procedural/src/construct_runtime/expand/event.rs @@ -18,6 +18,7 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; +use std::str::FromStr; use syn::{Generics, Ident}; pub fn expand_outer_event( @@ -97,19 +98,35 @@ fn expand_event_variant( let path = &pallet.path; let variant_name = &pallet.name; let part_is_generic = !generics.params.is_empty(); + let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); match instance { - Some(inst) if part_is_generic => { - quote!(#[codec(index = #index)] #variant_name(#path::Event<#runtime, #path::#inst>),) + Some(inst) if part_is_generic => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Event<#runtime, #path::#inst>), }, - Some(inst) => { - quote!(#[codec(index = #index)] #variant_name(#path::Event<#path::#inst>),) + Some(inst) => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Event<#path::#inst>), }, - None if part_is_generic => { - quote!(#[codec(index = #index)] #variant_name(#path::Event<#runtime>),) + None if part_is_generic => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Event<#runtime>), }, - None => { - quote!(#[codec(index = #index)] #variant_name(#path::Event),) + None => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Event), }, } } @@ -120,13 +137,23 @@ fn expand_event_conversion( pallet_event: &TokenStream, ) -> TokenStream { let variant_name = &pallet.name; + let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); quote! { + #attr impl From<#pallet_event> for Event { fn from(x: #pallet_event) -> Self { Event::#variant_name(x) } } + #attr impl TryInto<#pallet_event> for Event { type Error = (); diff --git a/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/frame/support/procedural/src/construct_runtime/expand/inherent.rs index 0f0d538643240..599b34ba87241 100644 --- a/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -18,6 +18,7 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; +use std::str::FromStr; use syn::{Ident, TypePath}; pub fn expand_outer_inherent( @@ -28,14 +29,24 @@ pub fn expand_outer_inherent( scrate: &TokenStream, ) -> TokenStream { let mut pallet_names = Vec::new(); + let mut pallet_attrs = Vec::new(); let mut query_inherent_part_macros = Vec::new(); for pallet_decl in pallet_decls { if pallet_decl.exists_part("Inherent") { let name = &pallet_decl.name; let path = &pallet_decl.path; + let attr = pallet_decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); pallet_names.push(name); + pallet_attrs.push(attr); query_inherent_part_macros.push(quote! { #path::__substrate_inherent_check::is_inherent_part_defined!(#name); }); @@ -60,6 +71,7 @@ pub fn expand_outer_inherent( let mut inherents = Vec::new(); #( + #pallet_attrs if let Some(inherent) = #pallet_names::create_inherent(self) { let inherent = <#unchecked_extrinsic as #scrate::inherent::Extrinsic>::new( inherent.into(), @@ -90,22 +102,25 @@ pub fn expand_outer_inherent( let mut is_inherent = false; - #({ - let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); - if let Some(call) = IsSubType::<_>::is_sub_type(call) { - if #pallet_names::is_inherent(call) { - is_inherent = true; - if let Err(e) = #pallet_names::check_inherent(call, self) { - result.put_error( - #pallet_names::INHERENT_IDENTIFIER, &e - ).expect("There is only one fatal error; qed"); - if e.is_fatal_error() { - return result; + #( + #pallet_attrs + { + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if #pallet_names::is_inherent(call) { + is_inherent = true; + if let Err(e) = #pallet_names::check_inherent(call, self) { + result.put_error( + #pallet_names::INHERENT_IDENTIFIER, &e + ).expect("There is only one fatal error; qed"); + if e.is_fatal_error() { + return result; + } } } } } - })* + )* // Inherents are before any other extrinsics. // No module marked it as inherent thus it is not. @@ -115,6 +130,7 @@ pub fn expand_outer_inherent( } #( + #pallet_attrs match #pallet_names::is_inherent_required(self) { Ok(Some(e)) => { let found = block.extrinsics().iter().any(|xt| { @@ -177,14 +193,17 @@ pub fn expand_outer_inherent( false } else { let mut is_inherent = false; - #({ - let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); - if let Some(call) = IsSubType::<_>::is_sub_type(call) { - if #pallet_names::is_inherent(&call) { - is_inherent = true; + #( + #pallet_attrs + { + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if #pallet_names::is_inherent(&call) { + is_inherent = true; + } } } - })* + )* is_inherent }; diff --git a/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 6e2dd5fc002c6..ec90a0d30f98b 100644 --- a/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -18,6 +18,7 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; +use std::str::FromStr; use syn::{Ident, TypePath}; pub fn expand_runtime_metadata( @@ -47,8 +48,17 @@ pub fn expand_runtime_metadata( let event = expand_pallet_metadata_events(&filtered_names, runtime, scrate, decl); let constants = expand_pallet_metadata_constants(runtime, decl); let errors = expand_pallet_metadata_errors(runtime, decl); + let attr = decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); quote! { + #attr #scrate::metadata::PalletMetadata { name: stringify!(#name), index: #index, diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index f77177e2be01d..288cec3c1e175 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -18,6 +18,7 @@ use crate::construct_runtime::{Pallet, SYSTEM_PALLET_NAME}; use proc_macro2::TokenStream; use quote::quote; +use std::str::FromStr; use syn::{Generics, Ident}; pub fn expand_outer_origin( @@ -314,19 +315,35 @@ fn expand_origin_caller_variant( let part_is_generic = !generics.params.is_empty(); let variant_name = &pallet.name; let path = &pallet.path; + let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); match instance { - Some(inst) if part_is_generic => { - quote!(#[codec(index = #index)] #variant_name(#path::Origin<#runtime, #path::#inst>),) + Some(inst) if part_is_generic => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Origin<#runtime, #path::#inst>), }, - Some(inst) => { - quote!(#[codec(index = #index)] #variant_name(#path::Origin<#path::#inst>),) + Some(inst) => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Origin<#path::#inst>), }, - None if part_is_generic => { - quote!(#[codec(index = #index)] #variant_name(#path::Origin<#runtime>),) + None if part_is_generic => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Origin<#runtime>), }, - None => { - quote!(#[codec(index = #index)] #variant_name(#path::Origin),) + None => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Origin), }, } } @@ -350,14 +367,24 @@ fn expand_origin_pallet_conversions( }; let doc_string = get_intra_doc_string(" Convert to runtime origin using", &path.module_name()); + let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); quote! { + #attr impl From<#pallet_origin> for OriginCaller { fn from(x: #pallet_origin) -> Self { OriginCaller::#variant_name(x) } } + #attr impl From<#pallet_origin> for Origin { #[doc = #doc_string] fn from(x: #pallet_origin) -> Self { @@ -366,6 +393,7 @@ fn expand_origin_pallet_conversions( } } + #attr impl From for #scrate::sp_std::result::Result<#pallet_origin, Origin> { /// NOTE: converting to pallet origin loses the origin filter information. fn from(val: Origin) -> Self { @@ -377,6 +405,7 @@ fn expand_origin_pallet_conversions( } } + #attr impl TryFrom for #pallet_origin { type Error = OriginCaller; fn try_from( diff --git a/frame/support/procedural/src/construct_runtime/expand/unsigned.rs b/frame/support/procedural/src/construct_runtime/expand/unsigned.rs index c030676802093..310516793472f 100644 --- a/frame/support/procedural/src/construct_runtime/expand/unsigned.rs +++ b/frame/support/procedural/src/construct_runtime/expand/unsigned.rs @@ -18,6 +18,7 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; +use std::str::FromStr; use syn::Ident; pub fn expand_outer_validate_unsigned( @@ -26,14 +27,24 @@ pub fn expand_outer_validate_unsigned( scrate: &TokenStream, ) -> TokenStream { let mut pallet_names = Vec::new(); + let mut pallet_attrs = Vec::new(); let mut query_validate_unsigned_part_macros = Vec::new(); for pallet_decl in pallet_decls { if pallet_decl.exists_part("ValidateUnsigned") { let name = &pallet_decl.name; let path = &pallet_decl.path; + let attr = pallet_decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); pallet_names.push(name); + pallet_attrs.push(attr); query_validate_unsigned_part_macros.push(quote! { #path::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined!(#name); }); @@ -49,7 +60,10 @@ pub fn expand_outer_validate_unsigned( fn pre_dispatch(call: &Self::Call) -> Result<(), #scrate::unsigned::TransactionValidityError> { #[allow(unreachable_patterns)] match call { - #( Call::#pallet_names(inner_call) => #pallet_names::pre_dispatch(inner_call), )* + #( + #pallet_attrs + Call::#pallet_names(inner_call) => #pallet_names::pre_dispatch(inner_call), + )* // pre-dispatch should not stop inherent extrinsics, validation should prevent // including arbitrary (non-inherent) extrinsics to blocks. _ => Ok(()), @@ -63,7 +77,10 @@ pub fn expand_outer_validate_unsigned( ) -> #scrate::unsigned::TransactionValidity { #[allow(unreachable_patterns)] match call { - #( Call::#pallet_names(inner_call) => #pallet_names::validate_unsigned(source, inner_call), )* + #( + #pallet_attrs + Call::#pallet_names(inner_call) => #pallet_names::validate_unsigned(source, inner_call), + )* _ => #scrate::unsigned::UnknownTransaction::NoUnsignedValidator.into(), } } diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 7b4156a94db58..cfd582d0e4472 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -144,9 +144,11 @@ mod expand; mod parse; +use cfg_expr::Predicate; use frame_support_procedural_tools::{ generate_crate_access, generate_crate_access_2018, generate_hidden_includes, }; +use itertools::Itertools; use parse::{ ExplicitRuntimeDeclaration, ImplicitRuntimeDeclaration, Pallet, RuntimeDeclaration, WhereSection, @@ -154,6 +156,10 @@ use parse::{ use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; use quote::quote; +use std::{ + collections::{HashMap, HashSet}, + str::FromStr, +}; use syn::{Ident, Result}; /// The fixed name of the system pallet. @@ -223,6 +229,28 @@ fn construct_runtime_final_expansion( Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", ) })?; + if !system_pallet.cfg_pattern.is_empty() { + return Err(syn::Error::new( + system_pallet.name.span(), + "`System` pallet declaration is feature gated, please remove any `#[cfg]` attributes", + )) + } + + let features = pallets + .iter() + .filter_map(|decl| { + (!decl.cfg_pattern.is_empty()).then(|| { + decl.cfg_pattern.iter().flat_map(|attr| { + attr.predicates().filter_map(|pred| match pred { + Predicate::Feature(feat) => Some(feat), + Predicate::Test => Some("test"), + _ => None, + }) + }) + }) + }) + .flatten() + .collect::>(); let hidden_crate_name = "construct_runtime"; let scrate = generate_crate_access(hidden_crate_name, "frame-support"); @@ -231,7 +259,7 @@ fn construct_runtime_final_expansion( let outer_event = expand::expand_outer_event(&name, &pallets, &scrate)?; let outer_origin = expand::expand_outer_origin(&name, system_pallet, &pallets, &scrate)?; - let all_pallets = decl_all_pallets(&name, pallets.iter()); + let all_pallets = decl_all_pallets(&name, pallets.iter(), &features); let pallet_to_index = decl_pallet_runtime_setup(&name, &pallets, &scrate); let dispatch = expand::expand_outer_dispatch(&name, system_pallet, &pallets, &scrate); @@ -293,61 +321,140 @@ fn construct_runtime_final_expansion( fn decl_all_pallets<'a>( runtime: &'a Ident, pallet_declarations: impl Iterator, + features: &HashSet<&str>, ) -> TokenStream2 { let mut types = TokenStream2::new(); - let mut names = Vec::new(); + let mut names_by_feature = features + .iter() + .powerset() + .map(|feat| (feat, Vec::new())) + .collect::>(); for pallet_declaration in pallet_declarations { let type_name = &pallet_declaration.name; let pallet = &pallet_declaration.path; let mut generics = vec![quote!(#runtime)]; generics.extend(pallet_declaration.instance.iter().map(|name| quote!(#pallet::#name))); + let mut attrs = Vec::new(); + for cfg in &pallet_declaration.cfg_pattern { + let feat = format!("#[cfg({})]\n", cfg.original()); + attrs.extend(TokenStream2::from_str(&feat).expect("was parsed successfully; qed")); + } let type_decl = quote!( + #(#attrs)* pub type #type_name = #pallet::Pallet <#(#generics),*>; ); types.extend(type_decl); - names.push(&pallet_declaration.name); + + if pallet_declaration.cfg_pattern.is_empty() { + for names in names_by_feature.values_mut() { + names.push(&pallet_declaration.name); + } + } else { + for (feature_set, names) in &mut names_by_feature { + // Rust tidbit: if we have multiple `#[cfg]` feature on the same item, then the + // predicates listed in all `#[cfg]` attributes are effectively joined by `and()`, + // meaning that all of them must match in order to activate the item + let is_feature_active = pallet_declaration.cfg_pattern.iter().all(|expr| { + expr.eval(|pred| match pred { + Predicate::Feature(f) => feature_set.contains(&f), + Predicate::Test => feature_set.contains(&&"test"), + _ => false, + }) + }); + if is_feature_active { + names.push(&pallet_declaration.name); + } + } + } } - // Make nested tuple structure like: - // `((FirstPallet, (SecondPallet, ( ... , LastPallet) ... ))))` - // But ignore the system pallet. - let all_pallets_without_system = names - .iter() - .filter(|n| **n != SYSTEM_PALLET_NAME) - .rev() - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); + let all_pallets_without_system = names_by_feature.iter().map(|(feature_set, names)| { + let mut feature_set = feature_set.iter().collect::>(); + let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); + let feature_set = feature_set.into_iter(); + let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + let names = names.iter().filter(|n| **n != SYSTEM_PALLET_NAME); + quote! { + #attr + /// All pallets included in the runtime as a nested tuple of types. + /// Excludes the System pallet. + pub type AllPalletsWithoutSystem = ( #(#names,)* ); + } + }); - // Make nested tuple structure like: - // `((FirstPallet, (SecondPallet, ( ... , LastPallet) ... ))))` - let all_pallets_with_system = names - .iter() - .rev() - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); + let all_pallets_with_system = names_by_feature.iter().map(|(feature_set, names)| { + let mut feature_set = feature_set.iter().collect::>(); + let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); + let feature_set = feature_set.into_iter(); + let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + quote! { + #attr + /// All pallets included in the runtime as a nested tuple of types. + pub type AllPalletsWithSystem = ( #(#names,)* ); + } + }); - // Make nested tuple structure like: - // `((LastPallet, (SecondLastPallet, ( ... , FirstPallet) ... ))))` - // But ignore the system pallet. - let all_pallets_without_system_reversed = names - .iter() - .filter(|n| **n != SYSTEM_PALLET_NAME) - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); + let all_pallets_without_system_reversed = + names_by_feature.iter().map(|(feature_set, names)| { + let mut feature_set = feature_set.iter().collect::>(); + let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); + let feature_set = feature_set.into_iter(); + let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + let names = names.iter().filter(|n| **n != SYSTEM_PALLET_NAME).rev(); + quote! { + #attr + /// All pallets included in the runtime as a nested tuple of types in reversed order. + /// Excludes the System pallet. + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsWithoutSystemReversed = ( #(#names,)* ); + } + }); + + let all_pallets_with_system_reversed = names_by_feature.iter().map(|(feature_set, names)| { + let mut feature_set = feature_set.iter().collect::>(); + let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); + let feature_set = feature_set.into_iter(); + let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + let names = names.iter().rev(); + quote! { + #attr + /// All pallets included in the runtime as a nested tuple of types in reversed order. + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsWithSystemReversed = ( #(#names,)* ); + } + }); - // Make nested tuple structure like: - // `((LastPallet, (SecondLastPallet, ( ... , FirstPallet) ... ))))` - let all_pallets_with_system_reversed = names - .iter() - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); + let system_pallet = + match names_by_feature[&Vec::new()].iter().find(|n| **n == SYSTEM_PALLET_NAME) { + Some(name) => name, + None => + return syn::Error::new( + proc_macro2::Span::call_site(), + "`System` pallet declaration is missing. \ + Please add this line: `System: frame_system,`", + ) + .into_compile_error(), + }; - let system_pallet = match names.iter().find(|n| **n == SYSTEM_PALLET_NAME) { - Some(name) => name, - None => - return syn::Error::new( - proc_macro2::Span::call_site(), - "`System` pallet declaration is missing. \ - Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", - ) - .into_compile_error(), - }; + let all_pallets_reversed_with_system_first = + names_by_feature.iter().map(|(feature_set, names)| { + let mut feature_set = feature_set.iter().collect::>(); + let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); + let feature_set = feature_set.into_iter(); + let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + let names = std::iter::once(system_pallet) + .chain(names.iter().rev().filter(|n| **n != SYSTEM_PALLET_NAME)); + quote! { + #attr + /// All pallets included in the runtime as a nested tuple of types in reversed order. + /// With the system pallet first. + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsReversedWithSystemFirst = ( #(#names,)* ); + } + }); quote!( #types @@ -363,26 +470,15 @@ fn decl_all_pallets<'a>( https://github.com/paritytech/substrate/pull/10043")] pub type AllPallets = AllPalletsWithSystem; - /// All pallets included in the runtime as a nested tuple of types. - pub type AllPalletsWithSystem = ( #all_pallets_with_system ); + #( #all_pallets_with_system )* - /// All pallets included in the runtime as a nested tuple of types. - /// Excludes the System pallet. - pub type AllPalletsWithoutSystem = ( #all_pallets_without_system ); + #( #all_pallets_without_system )* - /// All pallets included in the runtime as a nested tuple of types in reversed order. - /// Excludes the System pallet. - pub type AllPalletsWithoutSystemReversed = ( #all_pallets_without_system_reversed ); + #( #all_pallets_with_system_reversed )* - /// All pallets included in the runtime as a nested tuple of types in reversed order. - pub type AllPalletsWithSystemReversed = ( #all_pallets_with_system_reversed ); + #( #all_pallets_without_system_reversed )* - /// All pallets included in the runtime as a nested tuple of types in reversed order. - /// With the system pallet first. - pub type AllPalletsReversedWithSystemFirst = ( - #system_pallet, - AllPalletsWithoutSystemReversed - ); + #( #all_pallets_reversed_with_system_first )* ) } @@ -405,6 +501,19 @@ fn decl_pallet_runtime_setup( } }) .collect::>(); + let pallet_attrs = pallet_declarations + .iter() + .map(|pallet| { + pallet.cfg_pattern.iter().fold(TokenStream2::new(), |acc, pattern| { + let attr = TokenStream2::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }) + }) + .collect::>(); quote!( /// Provides an implementation of `PalletInfo` to provide information @@ -415,6 +524,7 @@ fn decl_pallet_runtime_setup( fn index() -> Option { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( + #pallet_attrs if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some(#indices) } @@ -426,6 +536,7 @@ fn decl_pallet_runtime_setup( fn name() -> Option<&'static str> { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( + #pallet_attrs if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some(#name_strings) } @@ -437,6 +548,7 @@ fn decl_pallet_runtime_setup( fn module_name() -> Option<&'static str> { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( + #pallet_attrs if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some(#module_names) } @@ -448,6 +560,7 @@ fn decl_pallet_runtime_setup( fn crate_version() -> Option<#scrate::traits::CrateVersion> { let type_id = #scrate::sp_std::any::TypeId::of::

+ Debug, H: HeaderT, - C: UsageProvider + HeaderBackend, + BA: ClientBackend, + C: UsageProvider + HeaderBackend + StorageProvider, { // Store the time that it took to write each value. let mut record = BenchRecord::default(); @@ -54,57 +60,103 @@ impl StorageCmd { let block = BlockId::Number(client.usage_info().chain.best_number); let header = client.header(block)?.ok_or("Header not found")?; let original_root = *header.state_root(); - let trie = DbState::::new(storage.clone(), original_root); + let trie = DbStateBuilder::::new(storage.clone(), original_root).build(); info!("Preparing keys from block {}", block); // Load all KV pairs and randomly shuffle them. let mut kvs = trie.pairs(); let (mut rng, _) = new_rng(None); kvs.shuffle(&mut rng); + info!("Writing {} keys", kvs.len()); + + let mut child_nodes = Vec::new(); // Generate all random values first; Make sure there are no collisions with existing // db entries, so we can rollback all additions without corrupting existing entries. - for (k, original_v) in kvs.iter_mut() { - 'retry: loop { - let mut new_v = vec![0; original_v.len()]; - // Create a random value to overwrite with. - // NOTE: We use a possibly higher entropy than the original value, - // could be improved but acts as an over-estimation which is fine for now. - rng.fill_bytes(&mut new_v[..]); - let new_kv = vec![(k.as_ref(), Some(new_v.as_ref()))]; - let (_, mut stx) = trie.storage_root(new_kv.iter().cloned(), self.state_version()); - for (mut k, (_, rc)) in stx.drain().into_iter() { - if rc > 0 { - db.sanitize_key(&mut k); - if db.get(state_col, &k).is_some() { - trace!("Benchmark-store key creation: Key collision detected, retry"); - continue 'retry + for (k, original_v) in kvs { + match (self.params.include_child_trees, self.is_child_key(k.to_vec())) { + (true, Some(info)) => { + let child_keys = + client.child_storage_keys_iter(&block, info.clone(), None, None)?; + for ck in child_keys { + child_nodes.push((ck.clone(), info.clone())); + } + }, + _ => { + // regular key + let mut new_v = vec![0; original_v.len()]; + loop { + // Create a random value to overwrite with. + // NOTE: We use a possibly higher entropy than the original value, + // could be improved but acts as an over-estimation which is fine for now. + rng.fill_bytes(&mut new_v[..]); + if check_new_value::( + db.clone(), + &trie, + &k.to_vec(), + &new_v, + self.state_version(), + state_col, + None, + ) { + break } } - } - *original_v = new_v; - break + + // Write each value in one commit. + let (size, duration) = measure_write::( + db.clone(), + &trie, + k.to_vec(), + new_v.to_vec(), + self.state_version(), + state_col, + None, + )?; + record.append(size, duration)?; + }, } } - info!("Writing {} keys", kvs.len()); - // Write each value in one commit. - for (k, new_v) in kvs.iter() { - // Interesting part here: - let start = Instant::now(); - // Create a TX that will modify the Trie in the DB and - // calculate the root hash of the Trie after the modification. - let replace = vec![(k.as_ref(), Some(new_v.as_ref()))]; - let (_, stx) = trie.storage_root(replace.iter().cloned(), self.state_version()); - // Only the keep the insertions, since we do not want to benchmark pruning. - let tx = convert_tx::(db.clone(), stx.clone(), false, state_col); - db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; - record.append(new_v.len(), start.elapsed())?; - - // Now undo the changes by removing what was added. - let tx = convert_tx::(db.clone(), stx.clone(), true, state_col); - db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; + if self.params.include_child_trees { + child_nodes.shuffle(&mut rng); + info!("Writing {} child keys", child_nodes.len()); + + for (key, info) in child_nodes { + if let Some(original_v) = client + .child_storage(&block, &info.clone(), &key) + .expect("Checked above to exist") + { + let mut new_v = vec![0; original_v.0.len()]; + loop { + rng.fill_bytes(&mut new_v[..]); + if check_new_value::( + db.clone(), + &trie, + &key.0, + &new_v, + self.state_version(), + state_col, + Some(&info), + ) { + break + } + } + + let (size, duration) = measure_write::( + db.clone(), + &trie, + key.0, + new_v.to_vec(), + self.state_version(), + state_col, + Some(&info), + )?; + record.append(size, duration)?; + } + } } + Ok(record) } } @@ -134,3 +186,62 @@ fn convert_tx( } ret } + +/// Measures write benchmark +/// if `child_info` exist then it means this is a child tree key +fn measure_write( + db: Arc>, + trie: &DbState, + key: Vec, + new_v: Vec, + version: StateVersion, + col: ColumnId, + child_info: Option<&ChildInfo>, +) -> Result<(usize, Duration)> { + let start = Instant::now(); + // Create a TX that will modify the Trie in the DB and + // calculate the root hash of the Trie after the modification. + let replace = vec![(key.as_ref(), Some(new_v.as_ref()))]; + let stx = match child_info { + Some(info) => trie.child_storage_root(info, replace.iter().cloned(), version).2, + None => trie.storage_root(replace.iter().cloned(), version).1, + }; + // Only the keep the insertions, since we do not want to benchmark pruning. + let tx = convert_tx::(db.clone(), stx.clone(), false, col); + db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; + let result = (new_v.len(), start.elapsed()); + + // Now undo the changes by removing what was added. + let tx = convert_tx::(db.clone(), stx.clone(), true, col); + db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; + Ok(result) +} + +/// Checks if a new value causes any collision in tree updates +/// returns true if there is no collision +/// if `child_info` exist then it means this is a child tree key +fn check_new_value( + db: Arc>, + trie: &DbState, + key: &Vec, + new_v: &Vec, + version: StateVersion, + col: ColumnId, + child_info: Option<&ChildInfo>, +) -> bool { + let new_kv = vec![(key.as_ref(), Some(new_v.as_ref()))]; + let mut stx = match child_info { + Some(info) => trie.child_storage_root(info, new_kv.iter().cloned(), version).2, + None => trie.storage_root(new_kv.iter().cloned(), version).1, + }; + for (mut k, (_, rc)) in stx.drain().into_iter() { + if rc > 0 { + db.sanitize_key(&mut k); + if db.get(col, &k).is_some() { + trace!("Benchmark-store key creation: Key collision detected, retry"); + return false + } + } + } + true +} diff --git a/utils/frame/generate-bags/node-runtime/Cargo.toml b/utils/frame/generate-bags/node-runtime/Cargo.toml index c5f3b1998fa97..5af7dd78a08e8 100644 --- a/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -6,11 +6,11 @@ edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" -description = "Bag threshold generation script for pallet-bag-list and node-runtime." +description = "Bag threshold generation script for pallet-bag-list and kitchensink-runtime." readme = "README.md" [dependencies] -node-runtime = { version = "3.0.0-dev", path = "../../../../bin/node/runtime" } +kitchensink-runtime = { version = "3.0.0-dev", path = "../../../../bin/node/runtime" } generate-bags = { version = "4.0.0-dev", path = "../" } # third-party diff --git a/utils/frame/generate-bags/node-runtime/src/main.rs b/utils/frame/generate-bags/node-runtime/src/main.rs index 12bcf8d28cf2b..5ea1262d95d34 100644 --- a/utils/frame/generate-bags/node-runtime/src/main.rs +++ b/utils/frame/generate-bags/node-runtime/src/main.rs @@ -43,5 +43,10 @@ struct Opt { fn main() -> Result<(), std::io::Error> { let Opt { n_bags, output, total_issuance, minimum_balance } = Opt::parse(); - generate_thresholds::(n_bags, &output, total_issuance, minimum_balance) + generate_thresholds::( + n_bags, + &output, + total_issuance, + minimum_balance, + ) } diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index a77d090246421..3121157df68d8 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } env_logger = "0.9" -jsonrpsee = { version = "0.14.0", features = ["ws-client", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["ws-client", "macros"] } log = "0.4.17" serde = "1.0.136" serde_json = "1.0" diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 1d9dc5dcd11da..202560f18cf84 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -130,7 +130,7 @@ pub enum Transport { impl Transport { fn as_client(&self) -> Option<&WsClient> { match self { - Self::RemoteClient(client) => Some(&*client), + Self::RemoteClient(client) => Some(client), _ => None, } } diff --git a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 6c6bc5cf327b5..d45e502df276c 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -23,9 +23,9 @@ sp-io = { path = "../../../../primitives/io" } sp-core = { path = "../../../../primitives/core" } sp-state-machine = { path = "../../../../primitives/state-machine" } sp-trie = { path = "../../../../primitives/trie" } -trie-db = { version = "0.23.1" } +trie-db = "0.24.0" -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } # Substrate Dependencies sc-client-api = { version = "4.0.0-dev", path = "../../../../client/api" } diff --git a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index b6d403ff2fcfd..f9a57206ece4d 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -31,8 +31,11 @@ use sp_core::{ storage::{ChildInfo, ChildType, PrefixedStorageKey}, Hasher, }; -use sp_state_machine::Backend; -use sp_trie::{trie_types::TrieDB, KeySpacedDB, Trie}; +use sp_state_machine::backend::AsTrieBackend; +use sp_trie::{ + trie_types::{TrieDB, TrieDBBuilder}, + KeySpacedDB, Trie, +}; use trie_db::{ node::{NodePlan, ValuePlan}, TrieDBNodeIterator, @@ -41,9 +44,9 @@ use trie_db::{ fn count_migrate<'a, H: Hasher>( storage: &'a dyn trie_db::HashDBRef>, root: &'a H::Out, -) -> std::result::Result<(u64, TrieDB<'a, H>), String> { +) -> std::result::Result<(u64, TrieDB<'a, 'a, H>), String> { let mut nb = 0u64; - let trie = TrieDB::new(storage, root).map_err(|e| format!("TrieDB creation error: {}", e))?; + let trie = TrieDBBuilder::new(storage, root).build(); let iter_node = TrieDBNodeIterator::new(&trie).map_err(|e| format!("TrieDB node iterator error: {}", e))?; for node in iter_node { @@ -68,13 +71,9 @@ pub fn migration_status(backend: &B) -> std::result::Result<(u64, u64), St where H: Hasher, H::Out: codec::Codec, - B: Backend, + B: AsTrieBackend, { - let trie_backend = if let Some(backend) = backend.as_trie_backend() { - backend - } else { - return Err("No access to trie from backend.".to_string()) - }; + let trie_backend = backend.as_trie_backend(); let essence = trie_backend.essence(); let (nb_to_migrate, trie) = count_migrate(essence, essence.root())?; diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 7ea07534e1bdb..2104774bd2605 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" -jsonrpsee = { version = "0.14.0", features = ["jsonrpsee-types"] } +jsonrpsee = { version = "0.15.1", features = ["jsonrpsee-types"] } serde = "1" frame-support = { version = "4.0.0-dev", path = "../../../../frame/support" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } @@ -25,6 +25,6 @@ sp-storage = { version = "6.0.0", path = "../../../../primitives/storage" } [dev-dependencies] scale-info = "2.1.1" -jsonrpsee = { version = "0.14.0", features = ["ws-client", "jsonrpsee-types"] } +jsonrpsee = { version = "0.15.1", features = ["ws-client", "jsonrpsee-types"] } tokio = "1.17.0" frame-system = { version = "4.0.0-dev", path = "../../../../frame/system" } diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index f76944a0fec40..5d8984e8d399b 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde_json = "1" codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } futures = "0.3.21" log = "0.4.17" frame-system-rpc-runtime-api = { version = "4.0.0-dev", path = "../../../../frame/system/rpc/runtime-api" } diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index b82da0f1222a7..4b4f9bdb2809a 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -19,7 +19,7 @@ parity-scale-codec = "3.0.0" serde = "1.0.136" zstd = { version = "0.11.2", default-features = false } remote-externalities = { version = "0.10.0-dev", path = "../../remote-externalities" } -jsonrpsee = { version = "0.14.0", default-features = false, features = ["ws-client"] } +jsonrpsee = { version = "0.15.1", default-features = false, features = ["ws-client"] } sc-chain-spec = { version = "4.0.0-dev", path = "../../../../client/chain-spec" } sc-cli = { version = "0.10.0-dev", path = "../../../../client/cli" } sc-executor = { version = "0.10.0-dev", path = "../../../../client/executor" } diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index 204acd879312f..aee3c34a1e0e8 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -139,7 +139,8 @@ where .state .builder::()? // make sure the state is being build with the parent hash, if it is online. - .overwrite_online_at(parent_hash.to_owned()); + .overwrite_online_at(parent_hash.to_owned()) + .state_version(shared.state_version); let builder = if command.overwrite_wasm_code { log::info!( diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index e2e6bd7244945..b6a11699a3d72 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -102,11 +102,13 @@ where // create an ext at the state of this block, whatever is the first subscription event. if maybe_state_ext.is_none() { - let builder = Builder::::new().mode(Mode::Online(OnlineConfig { - transport: command.uri.clone().into(), - at: Some(*header.parent_hash()), - ..Default::default() - })); + let builder = Builder::::new() + .mode(Mode::Online(OnlineConfig { + transport: command.uri.clone().into(), + at: Some(*header.parent_hash()), + ..Default::default() + })) + .state_version(shared.state_version); let new_ext = builder .inject_hashed_key_value(&[(code_key.clone(), code.clone())]) diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index 50780f4513b2f..11ceb0a81cf37 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -128,7 +128,7 @@ where ); let ext = { - let builder = command.state.builder::()?; + let builder = command.state.builder::()?.state_version(shared.state_version); let builder = if command.overwrite_wasm_code { log::info!( diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index 616498da02497..9bc6d32d4762a 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -52,7 +52,7 @@ where let execution = shared.execution; let ext = { - let builder = command.state.builder::()?; + let builder = command.state.builder::()?.state_version(shared.state_version); let (code_key, code) = extract_code(&config.chain_spec)?; builder.inject_hashed_key_value(&[(code_key, code)]).build().await? }; diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index c09a33cf3f16a..9013da95f1722 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -293,7 +293,8 @@ use sp_runtime::{ traits::{Block as BlockT, NumberFor}, DeserializeOwned, }; -use sp_state_machine::{InMemoryProvingBackend, OverlayedChanges, StateMachine}; +use sp_state_machine::{OverlayedChanges, StateMachine, TrieBackendBuilder}; +use sp_version::StateVersion; use std::{fmt::Debug, path::PathBuf, str::FromStr}; mod commands; @@ -421,6 +422,10 @@ pub struct SharedParams { /// When enabled, the spec name check will not panic, and instead only show a warning. #[clap(long)] pub no_spec_name_check: bool, + + /// State version that is used by the chain. + #[clap(long, default_value = "1", parse(try_from_str = parse::state_version))] + pub state_version: StateVersion, } /// Our `try-runtime` command. @@ -472,9 +477,10 @@ pub enum State { #[clap(short, long)] snapshot_path: Option, - /// The pallets to scrape. If empty, entire chain state will be scraped. + /// A pallet to scrape. Can be provided multiple times. If empty, entire chain state will + /// be scraped. #[clap(short, long, multiple_values = true)] - pallets: Vec, + pallet: Vec, /// Fetch the child-keys as well. /// @@ -498,7 +504,7 @@ impl State { Builder::::new().mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path), })), - State::Live { snapshot_path, pallets, uri, at, child_tree } => { + State::Live { snapshot_path, pallet, uri, at, child_tree } => { let at = match at { Some(at_str) => Some(hash_of::(at_str)?), None => None, @@ -507,7 +513,7 @@ impl State { .mode(Mode::Online(OnlineConfig { transport: uri.to_owned().into(), state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), - pallets: pallets.clone(), + pallets: pallet.clone(), scrape_children: true, at, })) @@ -745,9 +751,11 @@ pub(crate) fn state_machine_call_with_proof(Into::into)?; - let proof = proving_backend.extract_proof(); + let proof = proving_backend + .extract_proof() + .expect("A recorder was set and thus, a storage proof can be extracted; qed"); let proof_size = proof.encoded_size(); let compact_proof = proof .clone() diff --git a/utils/frame/try-runtime/cli/src/parse.rs b/utils/frame/try-runtime/cli/src/parse.rs index 15a0251ebc34a..257a99566979f 100644 --- a/utils/frame/try-runtime/cli/src/parse.rs +++ b/utils/frame/try-runtime/cli/src/parse.rs @@ -17,6 +17,8 @@ //! Utils for parsing user input +use sp_version::StateVersion; + pub(crate) fn hash(block_hash: &str) -> Result { let (block_hash, offset) = if let Some(block_hash) = block_hash.strip_prefix("0x") { (block_hash, 2) @@ -42,3 +44,10 @@ pub(crate) fn url(s: &str) -> Result { Err("not a valid WS(S) url: must start with 'ws://' or 'wss://'") } } + +pub(crate) fn state_version(s: &str) -> Result { + s.parse::() + .map_err(|_| ()) + .and_then(StateVersion::try_from) + .map_err(|_| "Invalid state version.") +} diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index 8f887e45ec176..ac0ba5dbdb85a 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] ansi_term = "0.12.1" build-helper = "0.1.1" cargo_metadata = "0.14.2" -strum = { version = "0.23.0", features = ["derive"] } +strum = { version = "0.24.1", features = ["derive"] } tempfile = "3.1.0" toml = "0.5.4" walkdir = "2.3.2" diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index 919290655368b..fc86a06170a50 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -92,7 +92,7 @@ //! //! Each project can be skipped individually by using the environment variable //! `SKIP_PROJECT_NAME_WASM_BUILD`. Where `PROJECT_NAME` needs to be replaced by the name of the -//! cargo project, e.g. `node-runtime` will be `NODE_RUNTIME`. +//! cargo project, e.g. `kitchensink-runtime` will be `NODE_RUNTIME`. //! //! ## Prerequisites: //!

(); #( + #pallet_attrs if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some( <#pallet_structs as #scrate::traits::PalletInfoAccess>::crate_version() diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index a2cda6a0777b8..7a5acf43b92b0 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -23,7 +23,7 @@ use syn::{ parse::{Parse, ParseStream}, punctuated::Punctuated, spanned::Spanned, - token, Error, Ident, Path, Result, Token, + token, Attribute, Error, Ident, Path, Result, Token, }; mod keyword { @@ -73,7 +73,14 @@ pub struct ExplicitRuntimeDeclaration { impl Parse for RuntimeDeclaration { fn parse(input: ParseStream) -> Result { input.parse::()?; - input.parse::()?; + + // Support either `enum` or `struct`. + if input.peek(Token![struct]) { + input.parse::()?; + } else { + input.parse::()?; + } + let name = input.parse::()?; let where_section = input.parse()?; let pallets = @@ -178,6 +185,8 @@ impl Parse for WhereDefinition { pub struct PalletDeclaration { /// The name of the pallet, e.g.`System` in `System: frame_system`. pub name: Ident, + /// Optional attributes tagged right above a pallet declaration. + pub attrs: Vec, /// Optional fixed index, e.g. `MyPallet ... = 3,`. pub index: Option, /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. @@ -205,6 +214,8 @@ pub enum SpecifiedParts { impl Parse for PalletDeclaration { fn parse(input: ParseStream) -> Result { + let attrs = input.call(Attribute::parse_outer)?; + let name = input.parse()?; let _: Token![:] = input.parse()?; let path = input.parse()?; @@ -272,7 +283,7 @@ impl Parse for PalletDeclaration { None }; - Ok(Self { name, path, instance, pallet_parts, specified_parts, index }) + Ok(Self { attrs, name, path, instance, pallet_parts, specified_parts, index }) } } @@ -528,6 +539,8 @@ pub struct Pallet { pub instance: Option, /// The pallet parts to use for the pallet. pub pallet_parts: Vec, + /// Expressions specified inside of a #[cfg] attribute. + pub cfg_pattern: Vec, } impl Pallet { @@ -640,11 +653,32 @@ fn convert_pallets(pallets: Vec) -> syn::Result (), } + let cfg_pattern = pallet + .attrs + .iter() + .map(|attr| { + if attr.path.segments.len() != 1 || attr.path.segments[0].ident != "cfg" { + let msg = "Unsupported attribute, only #[cfg] is supported on pallet \ + declarations in `construct_runtime`"; + return Err(syn::Error::new(attr.span(), msg)) + } + + attr.parse_args_with(|input: syn::parse::ParseStream| { + // Required, otherwise the parse stream doesn't advance and will result in + // an error. + let input = input.parse::()?; + cfg_expr::Expression::parse(&input.to_string()) + .map_err(|e| syn::Error::new(attr.span(), e.to_string())) + }) + }) + .collect::>>()?; + Ok(Pallet { name: pallet.name, index: final_index, path: pallet.path, instance: pallet.instance, + cfg_pattern, pallet_parts, }) }) diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 418fad56b7674..00204b7a4d906 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -429,7 +429,6 @@ pub fn pallet(attr: TokenStream, item: TokenStream) -> TokenStream { /// } /// ``` #[proc_macro_attribute] -#[deprecated(note = "This is now the default behaviour for all extrinsics.")] pub fn transactional(attr: TokenStream, input: TokenStream) -> TokenStream { transactional::transactional(attr, input).unwrap_or_else(|e| e.to_compile_error().into()) } diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index a5038ec399e2b..a9468451ad1d4 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -140,6 +140,24 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" }; + // Wrap all calls inside of storage layers + if let Some(syn::Item::Impl(item_impl)) = def + .call + .as_ref() + .map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index]) + { + item_impl.items.iter_mut().for_each(|i| { + if let syn::ImplItem::Method(method) = i { + let block = &method.block; + method.block = syn::parse_quote! {{ + // We execute all dispatchable in a new storage layer, allowing them + // to return an error at any point, and undoing any storage changes. + #frame_support::storage::with_storage_layer(|| #block) + }}; + } + }); + } + quote::quote_spanned!(span => #[doc(hidden)] pub mod __substrate_call_check { @@ -267,12 +285,8 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::sp_tracing::enter_span!( #frame_support::sp_tracing::trace_span!(stringify!(#fn_name)) ); - // We execute all dispatchable in at least one storage layer, allowing them - // to return an error at any point, and undoing any storage changes. - #frame_support::storage::in_storage_layer(|| { - <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) - .map(Into::into).map_err(Into::into) - }) + <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) + .map(Into::into).map_err(Into::into) }, )* Self::__Ignore(_, _) => { diff --git a/frame/support/procedural/src/pallet/expand/error.rs b/frame/support/procedural/src/pallet/expand/error.rs index 124e8b312ce39..5a8487b09de5c 100644 --- a/frame/support/procedural/src/pallet/expand/error.rs +++ b/frame/support/procedural/src/pallet/expand/error.rs @@ -111,7 +111,7 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&error_item.attrs).is_empty() { error_item.attrs.push(syn::parse_quote!( #[doc = r" - Custom [dispatch errors](https://docs.substrate.io/v3/runtime/events-and-errors) + Custom [dispatch errors](https://docs.substrate.io/main-docs/build/events-errors/) of this pallet. "] )); diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs index acd60ab959c61..791f930302207 100644 --- a/frame/support/procedural/src/pallet/expand/event.rs +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -98,7 +98,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&event_item.attrs).is_empty() { event_item.attrs.push(syn::parse_quote!( #[doc = r" - The [event](https://docs.substrate.io/v3/runtime/events-and-errors) emitted + The [event](https://docs.substrate.io/main-docs/build/events-errors/) emitted by this pallet. "] )); diff --git a/frame/support/procedural/src/pallet/expand/mod.rs b/frame/support/procedural/src/pallet/expand/mod.rs index 83bef7a97af1f..c33d2386700b2 100644 --- a/frame/support/procedural/src/pallet/expand/mod.rs +++ b/frame/support/procedural/src/pallet/expand/mod.rs @@ -74,7 +74,7 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { def.item.attrs.push(syn::parse_quote!( #[doc = r" The module that hosts all the - [FRAME](https://docs.substrate.io/v3/runtime/frame) + [FRAME](https://docs.substrate.io/main-docs/build/events-errors/) types needed to add this pallet to a runtime. "] diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index 52586a70a521a..f0fb6bacedffb 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -62,7 +62,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&pallet_item.attrs).is_empty() { pallet_item.attrs.push(syn::parse_quote!( #[doc = r" - The [pallet](https://docs.substrate.io/v3/runtime/frame#pallets) implementing + The [pallet](https://docs.substrate.io/reference/frame-pallets/#pallets) implementing the on-chain logic. "] )); @@ -240,9 +240,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { #config_where_clause { fn count() -> usize { 1 } - fn accumulate( - acc: &mut #frame_support::sp_std::vec::Vec<#frame_support::traits::PalletInfoData> - ) { + fn infos() -> #frame_support::sp_std::vec::Vec<#frame_support::traits::PalletInfoData> { use #frame_support::traits::PalletInfoAccess; let item = #frame_support::traits::PalletInfoData { index: Self::index(), @@ -250,7 +248,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { module_name: Self::module_name(), crate_version: Self::crate_version(), }; - acc.push(item); + #frame_support::sp_std::vec![item] } } diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index 657968e17a80c..181f35b545496 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -19,7 +19,9 @@ use crate::pallet::{ parse::storage::{Metadata, QueryKind, StorageDef, StorageGenerics}, Def, }; -use std::collections::HashMap; +use quote::ToTokens; +use std::{collections::HashMap, ops::IndexMut}; +use syn::spanned::Spanned; /// Generate the prefix_ident related to the storage. /// prefix_ident is used for the prefix struct to be given to storage as first generic param. @@ -84,12 +86,28 @@ fn check_prefix_duplicates( Ok(()) } +pub struct ResultOnEmptyStructMetadata { + /// The Rust ident that is going to be used as the name of the OnEmpty struct. + pub name: syn::Ident, + /// The path to the error type being returned by the ResultQuery. + pub error_path: syn::Path, + /// The visibility of the OnEmpty struct. + pub visibility: syn::Visibility, + /// The type of the storage item. + pub value_ty: syn::Type, + /// The name of the pallet error enum variant that is going to be returned. + pub variant_name: syn::Ident, + /// The span used to report compilation errors about the OnEmpty struct. + pub span: proc_macro2::Span, +} + /// /// * if generics are unnamed: replace the first generic `_` by the generated prefix structure /// * if generics are named: reorder the generic, remove their name, and add the missing ones. /// * Add `#[allow(type_alias_bounds)]` -pub fn process_generics(def: &mut Def) -> syn::Result<()> { +pub fn process_generics(def: &mut Def) -> syn::Result> { let frame_support = &def.frame_support; + let mut on_empty_struct_metadata = Vec::new(); for storage_def in def.storages.iter_mut() { let item = &mut def.item.content.as_mut().expect("Checked by def").1[storage_def.index]; @@ -120,27 +138,72 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { let default_query_kind: syn::Type = syn::parse_quote!(#frame_support::storage::types::OptionQuery); - let default_on_empty: syn::Type = syn::parse_quote!(#frame_support::traits::GetDefault); + let mut default_on_empty = |value_ty: syn::Type| -> syn::Type { + if let Some(QueryKind::ResultQuery(error_path, variant_name)) = + storage_def.query_kind.as_ref() + { + let on_empty_ident = + quote::format_ident!("__Frame_Internal_Get{}Result", storage_def.ident); + on_empty_struct_metadata.push(ResultOnEmptyStructMetadata { + name: on_empty_ident.clone(), + visibility: storage_def.vis.clone(), + value_ty, + error_path: error_path.clone(), + variant_name: variant_name.clone(), + span: storage_def.attr_span, + }); + return syn::parse_quote!(#on_empty_ident) + } + syn::parse_quote!(#frame_support::traits::GetDefault) + }; let default_max_values: syn::Type = syn::parse_quote!(#frame_support::traits::GetDefault); + let set_result_query_type_parameter = |query_type: &mut syn::Type| -> syn::Result<()> { + if let Some(QueryKind::ResultQuery(error_path, _)) = storage_def.query_kind.as_ref() { + if let syn::Type::Path(syn::TypePath { path: syn::Path { segments, .. }, .. }) = + query_type + { + if let Some(seg) = segments.last_mut() { + if let syn::PathArguments::AngleBracketed( + syn::AngleBracketedGenericArguments { args, .. }, + ) = &mut seg.arguments + { + args.clear(); + args.push(syn::GenericArgument::Type(syn::parse_quote!(#error_path))); + } + } + } else { + let msg = format!( + "Invalid pallet::storage, unexpected type for query, expected ResultQuery \ + with 1 type parameter, found `{}`", + query_type.to_token_stream().to_string() + ); + return Err(syn::Error::new(query_type.span(), msg)) + } + } + Ok(()) + }; + if let Some(named_generics) = storage_def.named_generics.clone() { args.args.clear(); args.args.push(syn::parse_quote!( #prefix_ident<#type_use_gen> )); match named_generics { StorageGenerics::Value { value, query_kind, on_empty } => { - args.args.push(syn::GenericArgument::Type(value)); - let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(value.clone())); + let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + set_result_query_type_parameter(&mut query_kind)?; args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); args.args.push(syn::GenericArgument::Type(on_empty)); }, StorageGenerics::Map { hasher, key, value, query_kind, on_empty, max_values } => { args.args.push(syn::GenericArgument::Type(hasher)); args.args.push(syn::GenericArgument::Type(key)); - args.args.push(syn::GenericArgument::Type(value)); - let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(value.clone())); + let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + set_result_query_type_parameter(&mut query_kind)?; args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); @@ -155,10 +218,11 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { } => { args.args.push(syn::GenericArgument::Type(hasher)); args.args.push(syn::GenericArgument::Type(key)); - args.args.push(syn::GenericArgument::Type(value)); - let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(value.clone())); + let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + set_result_query_type_parameter(&mut query_kind)?; args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); @@ -177,20 +241,22 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { args.args.push(syn::GenericArgument::Type(key1)); args.args.push(syn::GenericArgument::Type(hasher2)); args.args.push(syn::GenericArgument::Type(key2)); - args.args.push(syn::GenericArgument::Type(value)); - let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(value.clone())); + let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + set_result_query_type_parameter(&mut query_kind)?; args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); }, StorageGenerics::NMap { keygen, value, query_kind, on_empty, max_values } => { args.args.push(syn::GenericArgument::Type(keygen)); - args.args.push(syn::GenericArgument::Type(value)); - let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(value.clone())); + let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + set_result_query_type_parameter(&mut query_kind)?; args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); @@ -198,10 +264,40 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { } } else { args.args[0] = syn::parse_quote!( #prefix_ident<#type_use_gen> ); + + let (value_idx, query_idx, on_empty_idx) = match storage_def.metadata { + Metadata::Value { .. } => (1, 2, 3), + Metadata::NMap { .. } => (2, 3, 4), + Metadata::Map { .. } | Metadata::CountedMap { .. } => (3, 4, 5), + Metadata::DoubleMap { .. } => (5, 6, 7), + }; + + if query_idx < args.args.len() { + if let syn::GenericArgument::Type(query_kind) = args.args.index_mut(query_idx) { + set_result_query_type_parameter(query_kind)?; + } + } else if let Some(QueryKind::ResultQuery(error_path, _)) = + storage_def.query_kind.as_ref() + { + args.args.push(syn::GenericArgument::Type(syn::parse_quote!(#error_path))) + } + + // Here, we only need to check if OnEmpty is *not* specified, and if so, then we have to + // generate a default OnEmpty struct for it. + if on_empty_idx >= args.args.len() && + matches!(storage_def.query_kind.as_ref(), Some(QueryKind::ResultQuery(_, _))) + { + let value_ty = match args.args[value_idx].clone() { + syn::GenericArgument::Type(ty) => ty, + _ => unreachable!(), + }; + let on_empty = default_on_empty(value_ty); + args.args.push(syn::GenericArgument::Type(on_empty)); + } } } - Ok(()) + Ok(on_empty_struct_metadata) } /// @@ -212,9 +308,10 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { /// * Add `#[allow(type_alias_bounds)]` on storages type alias /// * generate metadatas pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { - if let Err(e) = process_generics(def) { - return e.into_compile_error() - } + let on_empty_struct_metadata = match process_generics(def) { + Ok(idents) => idents, + Err(e) => return e.into_compile_error(), + }; // Check for duplicate prefixes let mut prefix_set = HashMap::new(); @@ -277,6 +374,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), + QueryKind::ResultQuery(error_path, _) => + quote::quote_spanned!(storage.attr_span => + Result<#value, #error_path> + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -296,6 +397,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), + QueryKind::ResultQuery(error_path, _) => + quote::quote_spanned!(storage.attr_span => + Result<#value, #error_path> + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -317,6 +422,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), + QueryKind::ResultQuery(error_path, _) => + quote::quote_spanned!(storage.attr_span => + Result<#value, #error_path> + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -338,6 +447,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), + QueryKind::ResultQuery(error_path, _) => + quote::quote_spanned!(storage.attr_span => + Result<#value, #error_path> + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -361,6 +474,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), + QueryKind::ResultQuery(error_path, _) => + quote::quote_spanned!(storage.attr_span => + Result<#value, #error_path> + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -459,6 +576,61 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { ) }); + let on_empty_structs = on_empty_struct_metadata.into_iter().map(|metadata| { + use crate::pallet::parse::GenericKind; + use syn::{GenericArgument, Path, PathArguments, PathSegment, Type, TypePath}; + + let ResultOnEmptyStructMetadata { + name, + visibility, + value_ty, + error_path, + variant_name, + span, + } = metadata; + + let generic_kind = match error_path.segments.last() { + Some(PathSegment { arguments: PathArguments::AngleBracketed(args), .. }) => { + let (has_config, has_instance) = + args.args.iter().fold((false, false), |(has_config, has_instance), arg| { + match arg { + GenericArgument::Type(Type::Path(TypePath { + path: Path { segments, .. }, + .. + })) => { + let maybe_config = + segments.first().map_or(false, |seg| seg.ident == "T"); + let maybe_instance = + segments.first().map_or(false, |seg| seg.ident == "I"); + + (has_config || maybe_config, has_instance || maybe_instance) + }, + _ => (has_config, has_instance), + } + }); + GenericKind::from_gens(has_config, has_instance).unwrap_or(GenericKind::None) + }, + _ => GenericKind::None, + }; + let type_impl_gen = generic_kind.type_impl_gen(proc_macro2::Span::call_site()); + let config_where_clause = &def.config.where_clause; + + quote::quote_spanned!(span => + #[doc(hidden)] + #[allow(non_camel_case_types)] + #visibility struct #name; + + impl<#type_impl_gen> #frame_support::traits::Get> + for #name + #config_where_clause + { + fn get() -> Result<#value_ty, #error_path> { + Err(<#error_path>::#variant_name) + } + } + ) + }); + let mut where_clauses = vec![&def.config.where_clause]; where_clauses.extend(def.storages.iter().map(|storage| &storage.where_clause)); let completed_where_clause = super::merge_where_clauses(&where_clauses); @@ -489,5 +661,6 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { #( #getters )* #( #prefix_structs )* + #( #on_empty_structs )* ) } diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index 75c85474dcfe7..336e08c3d39b7 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -48,8 +48,8 @@ pub struct CallDef { pub docs: Vec, } -#[derive(Clone)] /// Definition of dispatchable typically: `#[weight...] fn foo(origin .., param1: ...) -> ..` +#[derive(Clone)] pub struct CallVariantDef { /// Function name. pub name: syn::Ident, @@ -143,18 +143,18 @@ impl CallDef { index: usize, item: &mut syn::Item, ) -> syn::Result { - let item = if let syn::Item::Impl(item) = item { + let item_impl = if let syn::Item::Impl(item) = item { item } else { return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")) }; let instances = vec![ - helper::check_impl_gen(&item.generics, item.impl_token.span())?, - helper::check_pallet_struct_usage(&item.self_ty)?, + helper::check_impl_gen(&item_impl.generics, item_impl.impl_token.span())?, + helper::check_pallet_struct_usage(&item_impl.self_ty)?, ]; - if let Some((_, _, for_)) = item.trait_ { + if let Some((_, _, for_)) = item_impl.trait_ { let msg = "Invalid pallet::call, expected no trait ident as in \ `impl<..> Pallet<..> { .. }`"; return Err(syn::Error::new(for_.span(), msg)) @@ -163,8 +163,8 @@ impl CallDef { let mut methods = vec![]; let mut indices = HashMap::new(); let mut last_index: Option = None; - for impl_item in &mut item.items { - if let syn::ImplItem::Method(method) = impl_item { + for item in &mut item_impl.items { + if let syn::ImplItem::Method(method) = item { if !matches!(method.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::call, dispatchable function must be public: \ `pub fn`"; @@ -188,7 +188,7 @@ impl CallDef { return Err(syn::Error::new(method.sig.span(), msg)) }, Some(syn::FnArg::Typed(arg)) => { - check_dispatchable_first_arg_type(&*arg.ty)?; + check_dispatchable_first_arg_type(&arg.ty)?; }, } @@ -290,7 +290,7 @@ impl CallDef { }); } else { let msg = "Invalid pallet::call, only method accepted"; - return Err(syn::Error::new(impl_item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)) } } @@ -299,8 +299,8 @@ impl CallDef { attr_span, instances, methods, - where_clause: item.generics.where_clause.clone(), - docs: get_doc_literals(&item.attrs), + where_clause: item_impl.generics.where_clause.clone(), + docs: get_doc_literals(&item_impl.attrs), }) } } diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 1f1bb5b2f26ad..f0e1353774910 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -29,6 +29,7 @@ mod keyword { syn::custom_keyword!(storage_prefix); syn::custom_keyword!(unbounded); syn::custom_keyword!(OptionQuery); + syn::custom_keyword!(ResultQuery); syn::custom_keyword!(ValueQuery); } @@ -129,6 +130,7 @@ pub enum Metadata { pub enum QueryKind { OptionQuery, + ResultQuery(syn::Path, syn::Ident), ValueQuery, } @@ -153,7 +155,7 @@ pub struct StorageDef { /// Optional expression that evaluates to a type that can be used as StoragePrefix instead of /// ident. pub rename_as: Option, - /// Whereas the querytype of the storage is OptionQuery or ValueQuery. + /// Whereas the querytype of the storage is OptionQuery, ResultQuery or ValueQuery. /// Note that this is best effort as it can't be determined when QueryKind is generic, and /// result can be false if user do some unexpected type alias. pub query_kind: Option, @@ -695,21 +697,105 @@ impl StorageDef { let (named_generics, metadata, query_kind) = process_generics(&typ.path.segments[0])?; let query_kind = query_kind - .map(|query_kind| match query_kind { - syn::Type::Path(path) - if path.path.segments.last().map_or(false, |s| s.ident == "OptionQuery") => - Some(QueryKind::OptionQuery), - syn::Type::Path(path) - if path.path.segments.last().map_or(false, |s| s.ident == "ValueQuery") => - Some(QueryKind::ValueQuery), - _ => None, + .map(|query_kind| { + use syn::{ + AngleBracketedGenericArguments, GenericArgument, Path, PathArguments, Type, + TypePath, + }; + + let result_query = match query_kind { + Type::Path(path) + if path + .path + .segments + .last() + .map_or(false, |s| s.ident == "OptionQuery") => + return Ok(Some(QueryKind::OptionQuery)), + Type::Path(TypePath { path: Path { segments, .. }, .. }) + if segments.last().map_or(false, |s| s.ident == "ResultQuery") => + segments + .last() + .expect("segments is checked to have the last value; qed") + .clone(), + Type::Path(path) + if path.path.segments.last().map_or(false, |s| s.ident == "ValueQuery") => + return Ok(Some(QueryKind::ValueQuery)), + _ => return Ok(None), + }; + + let error_type = match result_query.arguments { + PathArguments::AngleBracketed(AngleBracketedGenericArguments { + args, .. + }) => { + if args.len() != 1 { + let msg = format!( + "Invalid pallet::storage, unexpected number of generic arguments \ + for ResultQuery, expected 1 type argument, found {}", + args.len(), + ); + return Err(syn::Error::new(args.span(), msg)) + } + + args[0].clone() + }, + args => { + let msg = format!( + "Invalid pallet::storage, unexpected generic args for ResultQuery, \ + expected angle-bracketed arguments, found `{}`", + args.to_token_stream().to_string() + ); + return Err(syn::Error::new(args.span(), msg)) + }, + }; + + match error_type { + GenericArgument::Type(Type::Path(TypePath { + path: Path { segments: err_variant, leading_colon }, + .. + })) => { + if err_variant.len() < 2 { + let msg = format!( + "Invalid pallet::storage, unexpected number of path segments for \ + the generics in ResultQuery, expected a path with at least 2 \ + segments, found {}", + err_variant.len(), + ); + return Err(syn::Error::new(err_variant.span(), msg)) + } + let mut error = err_variant.clone(); + let err_variant = error + .pop() + .expect("Checked to have at least 2; qed") + .into_value() + .ident; + + // Necessary here to eliminate the last double colon + let last = + error.pop().expect("Checked to have at least 2; qed").into_value(); + error.push_value(last); + + Ok(Some(QueryKind::ResultQuery( + syn::Path { leading_colon, segments: error }, + err_variant, + ))) + }, + gen_arg => { + let msg = format!( + "Invalid pallet::storage, unexpected generic argument kind, expected a \ + type path to a `PalletError` enum variant, found `{}`", + gen_arg.to_token_stream().to_string(), + ); + Err(syn::Error::new(gen_arg.span(), msg)) + }, + } }) - .unwrap_or(Some(QueryKind::OptionQuery)); // This value must match the default generic. + .transpose()? + .unwrap_or(Some(QueryKind::OptionQuery)); if let (None, Some(getter)) = (query_kind.as_ref(), getter.as_ref()) { let msg = "Invalid pallet::storage, cannot generate getter because QueryKind is not \ - identifiable. QueryKind must be `OptionQuery`, `ValueQuery`, or default one to be \ - identifiable."; + identifiable. QueryKind must be `OptionQuery`, `ResultQuery`, `ValueQuery`, or default \ + one to be identifiable."; return Err(syn::Error::new(getter.span(), msg)) } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index ebb9a31cfe470..f9edce3e3d8d1 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -29,8 +29,7 @@ pub use crate::{ result, }, traits::{ - CallMetadata, DispatchableWithStorageLayer, GetCallMetadata, GetCallName, - GetStorageVersion, UnfilteredDispatchable, + CallMetadata, GetCallMetadata, GetCallName, GetStorageVersion, UnfilteredDispatchable, }, weights::{ ClassifyDispatch, DispatchInfo, GetDispatchInfo, PaysFee, PostDispatchInfo, @@ -177,18 +176,18 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug + /// ``` /// # #[macro_use] /// # extern crate frame_support; -/// # use frame_support::dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}; +/// # use frame_support::{weights::Weight, dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}}; /// # use frame_system::{Config, ensure_signed}; /// decl_module! { /// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 1_000_000] /// fn my_long_function(origin, do_expensive_calc: bool) -> DispatchResultWithPostInfo { -/// ensure_signed(origin).map_err(|e| e.with_weight(100_000))?; +/// ensure_signed(origin).map_err(|e| e.with_weight(Weight::from_ref_time(100_000)))?; /// if do_expensive_calc { /// // do the expensive calculation /// // ... /// // return None to indicate that we are using all weight (the default) -/// return Ok(None.into()); +/// return Ok(None::.into()); /// } /// // expensive calculation not executed: use only a portion of the weight /// Ok(Some(100_000).into()) @@ -307,7 +306,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug + /// /// The following are reserved function signatures: /// -/// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.io/v3/runtime/events-and-errors). +/// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.io/main-docs/build/events-errors/). /// The default behavior is to call `deposit_event` from the [System /// module](../frame_system/index.html). However, you can write your own implementation for events /// in your runtime. To use the default behavior, add `fn deposit_event() = default;` to your @@ -1473,9 +1472,9 @@ macro_rules! decl_module { $ignore:ident $mod_type:ident<$trait_instance:ident $(, $instance:ident)?> $fn_name:ident $origin:ident $system:ident [ $( $param_name:ident),* ] ) => { - // We execute all dispatchable in at least one storage layer, allowing them + // We execute all dispatchable in a new storage layer, allowing them // to return an error at any point, and undoing any storage changes. - $crate::storage::in_storage_layer(|| { + $crate::storage::with_storage_layer(|| { <$mod_type<$trait_instance $(, $instance)?>>::$fn_name( $origin $(, $param_name )* ).map(Into::into).map_err(Into::into) }) }; @@ -1615,7 +1614,7 @@ macro_rules! decl_module { pallet_name, ); - 0 + $crate::dispatch::Weight::new() } #[cfg(feature = "try-runtime")] @@ -1788,9 +1787,11 @@ macro_rules! decl_module { $vis fn $name( $origin: $origin_ty $(, $param: $param_ty )* ) -> $crate::dispatch::DispatchResult { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); - { $( $impl )* } - Ok(()) + $crate::storage::with_storage_layer(|| { + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); + { $( $impl )* } + Ok(()) + }) } }; @@ -1806,8 +1807,10 @@ macro_rules! decl_module { ) => { $(#[$fn_attr])* $vis fn $name($origin: $origin_ty $(, $param: $param_ty )* ) -> $result { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); - $( $impl )* + $crate::storage::with_storage_layer(|| { + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); + $( $impl )* + }) } }; @@ -2204,7 +2207,7 @@ macro_rules! decl_module { for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { fn count() -> usize { 1 } - fn accumulate(acc: &mut $crate::sp_std::vec::Vec<$crate::traits::PalletInfoData>) { + fn infos() -> $crate::sp_std::vec::Vec<$crate::traits::PalletInfoData> { use $crate::traits::PalletInfoAccess; let item = $crate::traits::PalletInfoData { index: Self::index(), @@ -2212,7 +2215,7 @@ macro_rules! decl_module { module_name: Self::module_name(), crate_version: Self::crate_version(), }; - acc.push(item); + vec![item] } } @@ -2646,13 +2649,13 @@ mod tests { #[weight = (5, DispatchClass::Operational)] fn operational(_origin) { unreachable!() } - fn on_initialize(n: T::BlockNumber,) -> Weight { if n.into() == 42 { panic!("on_initialize") } 7 } + fn on_initialize(n: T::BlockNumber,) -> Weight { if n.into() == 42 { panic!("on_initialize") } Weight::from_ref_time(7) } fn on_idle(n: T::BlockNumber, remaining_weight: Weight,) -> Weight { - if n.into() == 42 || remaining_weight == 42 { panic!("on_idle") } - 7 + if n.into() == 42 || remaining_weight == Weight::from_ref_time(42) { panic!("on_idle") } + Weight::from_ref_time(7) } fn on_finalize(n: T::BlockNumber,) { if n.into() == 42 { panic!("on_finalize") } } - fn on_runtime_upgrade() -> Weight { 10 } + fn on_runtime_upgrade() -> Weight { Weight::from_ref_time(10) } fn offchain_worker() {} /// Some doc fn integrity_test() { panic!("integrity_test") } @@ -2828,24 +2831,30 @@ mod tests { #[test] fn on_initialize_should_work_2() { - assert_eq!( as OnInitialize>::on_initialize(10), 7); + assert_eq!( + as OnInitialize>::on_initialize(10), + Weight::from_ref_time(7) + ); } #[test] #[should_panic(expected = "on_idle")] fn on_idle_should_work_1() { - as OnIdle>::on_idle(42, 9); + as OnIdle>::on_idle(42, Weight::from_ref_time(9)); } #[test] #[should_panic(expected = "on_idle")] fn on_idle_should_work_2() { - as OnIdle>::on_idle(9, 42); + as OnIdle>::on_idle(9, Weight::from_ref_time(42)); } #[test] fn on_idle_should_work_3() { - assert_eq!( as OnIdle>::on_idle(10, 11), 7); + assert_eq!( + as OnIdle>::on_idle(10, Weight::from_ref_time(11)), + Weight::from_ref_time(7) + ); } #[test] @@ -2857,7 +2866,10 @@ mod tests { #[test] fn on_runtime_upgrade_should_work() { sp_io::TestExternalities::default().execute_with(|| { - assert_eq!( as OnRuntimeUpgrade>::on_runtime_upgrade(), 10) + assert_eq!( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::from_ref_time(10) + ) }); } @@ -2866,12 +2878,20 @@ mod tests { // operational. assert_eq!( Call::::operational {}.get_dispatch_info(), - DispatchInfo { weight: 5, class: DispatchClass::Operational, pays_fee: Pays::Yes }, + DispatchInfo { + weight: Weight::from_ref_time(5), + class: DispatchClass::Operational, + pays_fee: Pays::Yes + }, ); // custom basic assert_eq!( Call::::aux_3 {}.get_dispatch_info(), - DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes }, + DispatchInfo { + weight: Weight::from_ref_time(3), + class: DispatchClass::Normal, + pays_fee: Pays::Yes + }, ); } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index f2cd7d1e165ec..a81a266fb9a3d 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -16,6 +16,16 @@ // limitations under the License. //! Support code for the runtime. +//! +//! ## Note on Tuple Traits +//! +//! Many of the traits defined in [`traits`] have auto-implementations on tuples as well. Usually, +//! the tuple is a function of number of pallets in the runtime. By default, the traits are +//! implemented for tuples of up to 64 items. +// +// If you have more pallets in your runtime, or for any other reason need more, enabled `tuples-96` +// or the `tuples-128` complication flag. Note that these features *will increase* the compilation +// of this crate. #![cfg_attr(not(feature = "std"), no_std)] @@ -271,79 +281,85 @@ pub use frame_support_procedural::storage_alias; macro_rules! parameter_types { ( $( #[ $attr:meta ] )* - $vis:vis const $name:ident: $type:ty = $value:expr; + $vis:vis const $name:ident $(< $($ty_params:ident),* >)?: $type:ty = $value:expr; $( $rest:tt )* ) => ( $( #[ $attr ] )* - $vis struct $name; - $crate::parameter_types!(IMPL_CONST $name , $type , $value); + $vis struct $name $( + < $($ty_params),* >( $($crate::sp_std::marker::PhantomData<$ty_params>),* ) + )?; + $crate::parameter_types!(IMPL_CONST $name , $type , $value $( $(, $ty_params)* )?); $crate::parameter_types!( $( $rest )* ); ); ( $( #[ $attr:meta ] )* - $vis:vis $name:ident: $type:ty = $value:expr; + $vis:vis $name:ident $(< $($ty_params:ident),* >)?: $type:ty = $value:expr; $( $rest:tt )* ) => ( $( #[ $attr ] )* - $vis struct $name; - $crate::parameter_types!(IMPL $name, $type, $value); + $vis struct $name $( + < $($ty_params),* >( $($crate::sp_std::marker::PhantomData<$ty_params>),* ) + )?; + $crate::parameter_types!(IMPL $name, $type, $value $( $(, $ty_params)* )?); $crate::parameter_types!( $( $rest )* ); ); ( $( #[ $attr:meta ] )* - $vis:vis storage $name:ident: $type:ty = $value:expr; + $vis:vis storage $name:ident $(< $($ty_params:ident),* >)?: $type:ty = $value:expr; $( $rest:tt )* ) => ( $( #[ $attr ] )* - $vis struct $name; - $crate::parameter_types!(IMPL_STORAGE $name, $type, $value); + $vis struct $name $( + < $($ty_params),* >( $($crate::sp_std::marker::PhantomData<$ty_params>),* ) + )?; + $crate::parameter_types!(IMPL_STORAGE $name, $type, $value $( $(, $ty_params)* )?); $crate::parameter_types!( $( $rest )* ); ); () => (); - (IMPL_CONST $name:ident, $type:ty, $value:expr) => { - impl $name { + (IMPL_CONST $name:ident, $type:ty, $value:expr $(, $ty_params:ident)*) => { + impl< $($ty_params),* > $name< $($ty_params),* > { /// Returns the value of this parameter type. pub const fn get() -> $type { $value } } - impl> $crate::traits::Get for $name { - fn get() -> I { - I::from(Self::get()) + impl<_I: From<$type> $(, $ty_params)*> $crate::traits::Get<_I> for $name< $($ty_params),* > { + fn get() -> _I { + _I::from(Self::get()) } } - impl $crate::traits::TypedGet for $name { + impl< $($ty_params),* > $crate::traits::TypedGet for $name< $($ty_params),* > { type Type = $type; fn get() -> $type { Self::get() } } }; - (IMPL $name:ident, $type:ty, $value:expr) => { - impl $name { + (IMPL $name:ident, $type:ty, $value:expr $(, $ty_params:ident)*) => { + impl< $($ty_params),* > $name< $($ty_params),* > { /// Returns the value of this parameter type. pub fn get() -> $type { $value } } - impl> $crate::traits::Get for $name { - fn get() -> I { - I::from(Self::get()) + impl<_I: From<$type>, $(, $ty_params)*> $crate::traits::Get<_I> for $name< $($ty_params),* > { + fn get() -> _I { + _I::from(Self::get()) } } - impl $crate::traits::TypedGet for $name { + impl< $($ty_params),* > $crate::traits::TypedGet for $name< $($ty_params),* > { type Type = $type; fn get() -> $type { Self::get() } } }; - (IMPL_STORAGE $name:ident, $type:ty, $value:expr) => { - impl $name { + (IMPL_STORAGE $name:ident, $type:ty, $value:expr $(, $ty_params:ident)*) => { + impl< $($ty_params),* > $name< $($ty_params),* > { /// Returns the key for this parameter type. #[allow(unused)] pub fn key() -> [u8; 16] { @@ -369,13 +385,13 @@ macro_rules! parameter_types { } } - impl> $crate::traits::Get for $name { - fn get() -> I { - I::from(Self::get()) + impl<_I: From<$type> $(, $ty_params)*> $crate::traits::Get<_I> for $name< $($ty_params),* > { + fn get() -> _I { + _I::from(Self::get()) } } - impl $crate::traits::TypedGet for $name { + impl< $($ty_params),* > $crate::traits::TypedGet for $name< $($ty_params),* > { type Type = $type; fn get() -> $type { Self::get() @@ -679,7 +695,7 @@ macro_rules! assert_noop { ) => { let h = $crate::storage_root($crate::StateVersion::V1); $crate::assert_err!($x, $y); - assert_eq!(h, $crate::storage_root($crate::StateVersion::V1)); + assert_eq!(h, $crate::storage_root($crate::StateVersion::V1), "storage has been mutated"); }; } @@ -826,6 +842,7 @@ pub mod tests { pub struct Module for enum Call where origin: T::Origin, system=self {} } } + use self::module::Module; decl_storage! { @@ -851,6 +868,7 @@ pub mod tests { } struct Test; + impl Config for Test { type BlockNumber = u32; type Origin = u32; @@ -867,6 +885,7 @@ pub mod tests { trait Sorted { fn sorted(self) -> Self; } + impl Sorted for Vec { fn sorted(mut self) -> Self { self.sort(); @@ -1347,15 +1366,15 @@ pub mod pallet_prelude { storage::{ bounded_vec::BoundedVec, types::{ - CountedStorageMap, Key as NMapKey, OptionQuery, StorageDoubleMap, StorageMap, - StorageNMap, StorageValue, ValueQuery, + CountedStorageMap, Key as NMapKey, OptionQuery, ResultQuery, StorageDoubleMap, + StorageMap, StorageNMap, StorageValue, ValueQuery, }, }, traits::{ ConstU32, EnsureOrigin, Get, GetDefault, GetStorageVersion, Hooks, IsType, PalletInfoAccess, StorageInfoTrait, StorageVersion, TypedGet, }, - weights::{DispatchClass, Pays, Weight}, + weights::{DispatchClass, Pays, RefTimeWeight, Weight}, Blake2_128, Blake2_128Concat, Blake2_256, CloneNoBound, DebugNoBound, EqNoBound, Identity, PartialEqNoBound, RuntimeDebug, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, }; @@ -1822,6 +1841,23 @@ pub mod pallet_prelude { /// All the `cfg` attributes are automatically copied to the items generated for the storage, /// i.e. the getter, storage prefix, and the metadata element etc. /// +/// Any type placed as the `QueryKind` parameter must implement +/// [`frame_support::storage::types::QueryKindTrait`]. There are 3 implementations of this +/// trait by default: +/// 1. [`frame_support::storage::types::OptionQuery`], the default `QueryKind` used when this +/// type parameter is omitted. Specifying this as the `QueryKind` would cause storage map +/// APIs that return a `QueryKind` to instead return an `Option`, returning `Some` when a +/// value does exist under a specified storage key, and `None` otherwise. +/// 2. [`frame_support::storage::types::ValueQuery`] causes storage map APIs that return a +/// `QueryKind` to instead return the value type. In cases where a value does not exist +/// under a specified storage key, the `OnEmpty` type parameter on `QueryKindTrait` is used +/// to return an appropriate value. +/// 3. [`frame_support::storage::types::ResultQuery`] causes storage map APIs that return a +/// `QueryKind` to instead return a `Result`, with `T` being the value type and `E` +/// being the pallet error type specified by the `#[pallet::error]` attribute. In cases +/// where a value does not exist under a specified storage key, an `Err` with the specified +/// pallet error variant is returned. +/// /// NOTE: If the `QueryKind` generic parameter is still generic at this stage or is using some /// type alias then the generation of the getter might fail. In this case the getter can be /// implemented manually. diff --git a/frame/support/src/migrations.rs b/frame/support/src/migrations.rs index 05833e0515c07..b559d3aca615f 100644 --- a/frame/support/src/migrations.rs +++ b/frame/support/src/migrations.rs @@ -19,6 +19,7 @@ use crate::{ traits::{GetStorageVersion, PalletInfoAccess}, weights::{RuntimeDbWeight, Weight}, }; +use impl_trait_for_tuples::impl_for_tuples; /// Trait used by [`migrate_from_pallet_version_to_storage_version`] to do the actual migration. pub trait PalletVersionToStorageVersionHelper { @@ -42,10 +43,12 @@ impl PalletVersionToStorageVersionHelpe } } -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl PalletVersionToStorageVersionHelper for T { fn migrate(db_weight: &RuntimeDbWeight) -> Weight { - let mut weight: Weight = 0; + let mut weight = Weight::new(); for_tuples!( #( weight = weight.saturating_add(T::migrate(db_weight)); )* ); diff --git a/frame/support/src/storage/types/double_map.rs b/frame/support/src/storage/types/double_map.rs index 2e090d30119aa..9ba4cf052e82a 100644 --- a/frame/support/src/storage/types/double_map.rs +++ b/frame/support/src/storage/types/double_map.rs @@ -25,6 +25,7 @@ use crate::{ KeyLenOf, StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, + StorageHasher, Twox128, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::SaturatedConversion; @@ -91,10 +92,10 @@ impl Key2: MaxEncodedLen, { fn get() -> u32 { - let z = Hasher1::max_len::() + - Hasher2::max_len::() + - Prefix::pallet_prefix().len() + - Prefix::STORAGE_PREFIX.len(); + // The `max_len` of both key hashes plus the pallet prefix and storage prefix (which both + // are hashed with `Twox128`). + let z = + Hasher1::max_len::() + Hasher2::max_len::() + Twox128::max_len::<()>() * 2; z as u32 } } @@ -755,6 +756,16 @@ mod test { } } + #[test] + fn keylenof_works() { + // Works with Blake2_128Concat and Twox64Concat. + type A = StorageDoubleMap; + let size = 16 * 2 // Two Twox128 + + 16 + 8 // Blake2_128Concat = hash + key + + 8 + 4; // Twox64Concat = hash + key + assert_eq!(KeyLenOf::::get(), size); + } + #[test] fn test() { type A = diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs index f4ac83c22663b..0f89e2378a55d 100644 --- a/frame/support/src/storage/types/map.rs +++ b/frame/support/src/storage/types/map.rs @@ -25,6 +25,7 @@ use crate::{ KeyLenOf, StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, + StorageHasher, Twox128, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::SaturatedConversion; @@ -61,8 +62,9 @@ where Key: FullCodec + MaxEncodedLen, { fn get() -> u32 { - let z = - Hasher::max_len::() + Prefix::pallet_prefix().len() + Prefix::STORAGE_PREFIX.len(); + // The `max_len` of the key hash plus the pallet prefix and storage prefix (which both are + // hashed with `Twox128`). + let z = Hasher::max_len::() + Twox128::max_len::<()>() * 2; z as u32 } } @@ -501,6 +503,27 @@ mod test { } } + #[test] + fn keylenof_works() { + // Works with Blake2_128Concat. + type A = StorageMap; + let size = 16 * 2 // Two Twox128 + + 16 + 4; // Blake2_128Concat = hash + key + assert_eq!(KeyLenOf::::get(), size); + + // Works with Blake2_256. + type B = StorageMap; + let size = 16 * 2 // Two Twox128 + + 32; // Blake2_256 + assert_eq!(KeyLenOf::::get(), size); + + // Works with Twox64Concat. + type C = StorageMap; + let size = 16 * 2 // Two Twox128 + + 8 + 4; // Twox64Concat = hash + key + assert_eq!(KeyLenOf::::get(), size); + } + #[test] fn test() { type A = StorageMap; diff --git a/frame/support/src/storage/types/mod.rs b/frame/support/src/storage/types/mod.rs index 0706e9fb377e2..f87da5de12274 100644 --- a/frame/support/src/storage/types/mod.rs +++ b/frame/support/src/storage/types/mod.rs @@ -42,9 +42,11 @@ pub use value::StorageValue; /// Trait implementing how the storage optional value is converted into the queried type. /// /// It is implemented by: -/// * `OptionQuery` which convert an optional value to an optional value, user when querying storage -/// will get an optional value. -/// * `ValueQuery` which convert an optional value to a value, user when querying storage will get a +/// * `OptionQuery` which converts an optional value to an optional value, used when querying +/// storage returns an optional value. +/// * `ResultQuery` which converts an optional value to a result value, used when querying storage +/// returns a result value. +/// * `ValueQuery` which converts an optional value to a value, used when querying storage returns a /// value. pub trait QueryKindTrait { /// Metadata for the storage kind. @@ -85,6 +87,30 @@ where } } +/// Implement QueryKindTrait with query being `Result` +pub struct ResultQuery(sp_std::marker::PhantomData); +impl QueryKindTrait for ResultQuery +where + Value: FullCodec + 'static, + Error: FullCodec + 'static, + OnEmpty: crate::traits::Get>, +{ + const METADATA: StorageEntryModifier = StorageEntryModifier::Optional; + + type Query = Result; + + fn from_optional_value_to_query(v: Option) -> Self::Query { + match v { + Some(v) => Ok(v), + None => OnEmpty::get(), + } + } + + fn from_query_to_optional_value(v: Self::Query) -> Option { + v.ok() + } +} + /// Implement QueryKindTrait with query being `Value` pub struct ValueQuery; impl QueryKindTrait for ValueQuery diff --git a/frame/support/src/storage/unhashed.rs b/frame/support/src/storage/unhashed.rs index fc6d8ae79c57d..089230bc9bfdf 100644 --- a/frame/support/src/storage/unhashed.rs +++ b/frame/support/src/storage/unhashed.rs @@ -155,7 +155,7 @@ pub fn clear_prefix( /// Get a Vec of bytes from storage. pub fn get_raw(key: &[u8]) -> Option> { - sp_io::storage::get(key) + sp_io::storage::get(key).map(|value| value.to_vec()) } /// Put a raw byte slice into storage. diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 22839d28c32c6..0ae502e1272bd 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -50,7 +50,7 @@ mod error; pub use error::PalletError; mod filter; -pub use filter::{ClearFilterGuard, FilterStack, FilterStackGuard, InstanceFilter, IntegrityTest}; +pub use filter::{ClearFilterGuard, FilterStack, FilterStackGuard, InstanceFilter}; mod misc; pub use misc::{ @@ -82,7 +82,8 @@ mod hooks; #[cfg(feature = "std")] pub use hooks::GenesisBuild; pub use hooks::{ - Hooks, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnRuntimeUpgrade, OnTimestampSet, + Hooks, IntegrityTest, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnRuntimeUpgrade, + OnTimestampSet, }; #[cfg(feature = "try-runtime")] pub use hooks::{OnRuntimeUpgradeHelpersExt, ON_RUNTIME_UPGRADE_PREFIX}; diff --git a/frame/support/src/traits/dispatch.rs b/frame/support/src/traits/dispatch.rs index 3d63dd3164ad7..159fa520481c4 100644 --- a/frame/support/src/traits/dispatch.rs +++ b/frame/support/src/traits/dispatch.rs @@ -236,23 +236,6 @@ pub trait UnfilteredDispatchable { fn dispatch_bypass_filter(self, origin: Self::Origin) -> DispatchResultWithPostInfo; } -/// Type that can be dispatched with an additional storage layer which is used to execute the call. -pub trait DispatchableWithStorageLayer { - /// The origin type of the runtime, (i.e. `frame_system::Config::Origin`). - type Origin; - - /// Same as `dispatch` from the [`frame_support::dispatch::Dispatchable`] trait, but - /// specifically spawns a new storage layer to execute the call inside of. - fn dispatch_with_storage_layer(self, origin: Self::Origin) -> DispatchResultWithPostInfo; - - /// Same as `dispatch_bypass_filter` from the [`UnfilteredDispatchable`] trait, but specifically - /// spawns a new storage layer to execute the call inside of. - fn dispatch_bypass_filter_with_storage_layer( - self, - origin: Self::Origin, - ) -> DispatchResultWithPostInfo; -} - /// The trait implemented by the overarching enumeration of the different pallets' origins. /// Unlike `OriginTrait` impls, this does not include any kind of dispatch/call filter. Also, this /// trait is more flexible in terms of how it can be used: it is a `Parameter` and `Member`, so it diff --git a/frame/support/src/traits/filter.rs b/frame/support/src/traits/filter.rs index 95e5954184b4b..cdd82a3124e63 100644 --- a/frame/support/src/traits/filter.rs +++ b/frame/support/src/traits/filter.rs @@ -180,17 +180,6 @@ macro_rules! impl_filter_stack { } } -/// Type that provide some integrity tests. -/// -/// This implemented for modules by `decl_module`. -#[impl_trait_for_tuples::impl_for_tuples(30)] -pub trait IntegrityTest { - /// Run integrity test. - /// - /// The test is not executed in a externalities provided environment. - fn integrity_test() {} -} - #[cfg(test)] pub mod test_impl_filter_stack { use super::*; diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index 385db4e4d1ad9..f8e91886edf0c 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -17,8 +17,8 @@ //! Traits for hooking tasks to events in a blockchain's lifecycle. +use crate::weights::Weight; use impl_trait_for_tuples::impl_for_tuples; -use sp_arithmetic::traits::Saturating; use sp_runtime::traits::AtLeast32BitUnsigned; /// The block initialization trait. @@ -33,15 +33,17 @@ pub trait OnInitialize { /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, /// including inherent extrinsics. Hence for instance, if you runtime includes /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { - 0 + fn on_initialize(_n: BlockNumber) -> Weight { + Weight::new() } } -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnInitialize for Tuple { - fn on_initialize(n: BlockNumber) -> crate::weights::Weight { - let mut weight = 0; + fn on_initialize(n: BlockNumber) -> Weight { + let mut weight = Weight::new(); for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(n.clone())); )* ); weight } @@ -50,7 +52,9 @@ impl OnInitialize for Tuple { /// The block finalization trait. /// /// Implementing this lets you express what should happen for your pallet when the block is ending. -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnFinalize { /// The block is being finalized. Implement to have something happen. /// @@ -71,22 +75,19 @@ pub trait OnIdle { /// /// NOTE: This function is called AFTER ALL extrinsics - including inherent extrinsics - /// in a block are applied but before `on_finalize` is executed. - fn on_idle( - _n: BlockNumber, - _remaining_weight: crate::weights::Weight, - ) -> crate::weights::Weight { - 0 + fn on_idle(_n: BlockNumber, _remaining_weight: Weight) -> Weight { + Weight::new() } } -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnIdle for Tuple { - fn on_idle(n: BlockNumber, remaining_weight: crate::weights::Weight) -> crate::weights::Weight { - let on_idle_functions: &[fn( - BlockNumber, - crate::weights::Weight, - ) -> crate::weights::Weight] = &[for_tuples!( #( Tuple::on_idle ),* )]; - let mut weight = 0; + fn on_idle(n: BlockNumber, remaining_weight: Weight) -> Weight { + let on_idle_functions: &[fn(BlockNumber, Weight) -> Weight] = + &[for_tuples!( #( Tuple::on_idle ),* )]; + let mut weight = Weight::new(); let len = on_idle_functions.len(); let start_index = n % (len as u32).into(); let start_index = start_index.try_into().ok().expect( @@ -105,7 +106,9 @@ impl OnIdle for Tuple { /// Implementing this trait for a pallet let's you express operations that should /// happen at genesis. It will be called in an externalities provided environment and /// will see the genesis state after all pallets have written their genesis state. -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnGenesis { /// Something that should happen at genesis. fn on_genesis() {} @@ -166,8 +169,8 @@ pub trait OnRuntimeUpgrade { /// block local data are not accessible. /// /// Return the non-negotiable weight consumed for runtime upgrade. - fn on_runtime_upgrade() -> crate::weights::Weight { - 0 + fn on_runtime_upgrade() -> Weight { + Weight::new() } /// Execute some pre-checks prior to a runtime upgrade. @@ -187,10 +190,12 @@ pub trait OnRuntimeUpgrade { } } -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnRuntimeUpgrade for Tuple { - fn on_runtime_upgrade() -> crate::weights::Weight { - let mut weight = 0; + fn on_runtime_upgrade() -> Weight { + let mut weight = Weight::new(); for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); weight } @@ -210,6 +215,19 @@ impl OnRuntimeUpgrade for Tuple { } } +/// Type that provide some integrity tests. +/// +/// This implemented for modules by `decl_module`. +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +pub trait IntegrityTest { + /// Run integrity test. + /// + /// The test is not executed in a externalities provided environment. + fn integrity_test() {} +} + /// The pallet hooks trait. Implementing this lets you express some logic to execute. pub trait Hooks { /// The block is being finalized. Implement to have something happen. @@ -220,18 +238,15 @@ pub trait Hooks { /// Will not fire if the remaining weight is 0. /// Return the weight used, the hook will subtract it from current weight used /// and pass the result to the next `on_idle` hook if it exists. - fn on_idle( - _n: BlockNumber, - _remaining_weight: crate::weights::Weight, - ) -> crate::weights::Weight { - 0 + fn on_idle(_n: BlockNumber, _remaining_weight: Weight) -> Weight { + Weight::new() } /// The block is being initialized. Implement to have something happen. /// /// Return the non-negotiable weight consumed in the block. - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { - 0 + fn on_initialize(_n: BlockNumber) -> Weight { + Weight::new() } /// Perform a module upgrade. @@ -253,8 +268,8 @@ pub trait Hooks { /// pallet is discouraged and might get deprecated in the future. Alternatively, export the same /// logic as a free-function from your pallet, and pass it to `type Executive` from the /// top-level runtime. - fn on_runtime_upgrade() -> crate::weights::Weight { - 0 + fn on_runtime_upgrade() -> Weight { + Weight::new() } /// Execute some pre-checks prior to a runtime upgrade. @@ -321,7 +336,9 @@ pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeD } /// A trait which is called when the timestamp is set in the runtime. -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnTimestampSet { /// Called when the timestamp is set. fn on_timestamp_set(moment: Moment); @@ -335,18 +352,18 @@ mod tests { fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { struct Test; impl OnInitialize for Test { - fn on_initialize(_n: u8) -> crate::weights::Weight { - 10 + fn on_initialize(_n: u8) -> Weight { + Weight::from_ref_time(10) } } impl OnRuntimeUpgrade for Test { - fn on_runtime_upgrade() -> crate::weights::Weight { - 20 + fn on_runtime_upgrade() -> Weight { + Weight::from_ref_time(20) } } - assert_eq!(<(Test, Test)>::on_initialize(0), 20); - assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); + assert_eq!(<(Test, Test)>::on_initialize(0), Weight::from_ref_time(20)); + assert_eq!(<(Test, Test)>::on_runtime_upgrade(), Weight::from_ref_time(40)); } #[test] @@ -358,48 +375,48 @@ mod tests { struct Test3; type TestTuple = (Test1, Test2, Test3); impl OnIdle for Test1 { - fn on_idle(_n: u32, _weight: crate::weights::Weight) -> crate::weights::Weight { + fn on_idle(_n: u32, _weight: Weight) -> Weight { unsafe { ON_IDLE_INVOCATION_ORDER.push("Test1"); } - 0 + Weight::zero() } } impl OnIdle for Test2 { - fn on_idle(_n: u32, _weight: crate::weights::Weight) -> crate::weights::Weight { + fn on_idle(_n: u32, _weight: Weight) -> Weight { unsafe { ON_IDLE_INVOCATION_ORDER.push("Test2"); } - 0 + Weight::zero() } } impl OnIdle for Test3 { - fn on_idle(_n: u32, _weight: crate::weights::Weight) -> crate::weights::Weight { + fn on_idle(_n: u32, _weight: Weight) -> Weight { unsafe { ON_IDLE_INVOCATION_ORDER.push("Test3"); } - 0 + Weight::zero() } } unsafe { - TestTuple::on_idle(0, 0); + TestTuple::on_idle(0, Weight::zero()); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test1", "Test2", "Test3"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); - TestTuple::on_idle(1, 0); + TestTuple::on_idle(1, Weight::zero()); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test2", "Test3", "Test1"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); - TestTuple::on_idle(2, 0); + TestTuple::on_idle(2, Weight::zero()); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test3", "Test1", "Test2"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); - TestTuple::on_idle(3, 0); + TestTuple::on_idle(3, Weight::zero()); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test1", "Test2", "Test3"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); - TestTuple::on_idle(4, 0); + TestTuple::on_idle(4, Weight::zero()); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test2", "Test3", "Test1"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); } diff --git a/frame/support/src/traits/members.rs b/frame/support/src/traits/members.rs index 8c69a2aaccb33..daf2d3aa6517d 100644 --- a/frame/support/src/traits/members.rs +++ b/frame/support/src/traits/members.rs @@ -17,6 +17,7 @@ //! Traits for dealing with the idea of membership. +use impl_trait_for_tuples::impl_for_tuples; use sp_std::{marker::PhantomData, prelude::*}; /// A trait for querying whether a type can be said to "contain" a value. @@ -25,7 +26,9 @@ pub trait Contains { fn contains(t: &T) -> bool; } -#[impl_trait_for_tuples::impl_for_tuples(1, 30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl Contains for Tuple { fn contains(t: &T) -> bool { for_tuples!( #( @@ -41,7 +44,9 @@ pub trait ContainsPair { fn contains(a: &A, b: &B) -> bool; } -#[impl_trait_for_tuples::impl_for_tuples(0, 30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl ContainsPair for Tuple { fn contains(a: &A, b: &B) -> bool { for_tuples!( #( diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs index d3dc57e1ee52d..b0dd5bd5160b4 100644 --- a/frame/support/src/traits/metadata.rs +++ b/frame/support/src/traits/metadata.rs @@ -18,6 +18,7 @@ //! Traits for managing information attached to pallets and their constituents. use codec::{Decode, Encode}; +use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::RuntimeDebug; use sp_std::prelude::*; @@ -70,40 +71,22 @@ pub trait PalletsInfoAccess { /// /// You probably don't want this function but `infos()` instead. fn count() -> usize { - 0 + // for backwards compatibility with XCM-3, Mark is deprecated. + Self::infos().len() } - /// Extend the given vector by all of the pallets' information that this type represents. - /// - /// You probably don't want this function but `infos()` instead. - fn accumulate(_accumulator: &mut Vec) {} - /// All of the pallets' information that this type represents. - fn infos() -> Vec { - let mut result = Vec::with_capacity(Self::count()); - Self::accumulate(&mut result); - result - } + fn infos() -> Vec; } -impl PalletsInfoAccess for () {} -impl PalletsInfoAccess for (T,) { - fn count() -> usize { - T::count() - } - fn accumulate(acc: &mut Vec) { - T::accumulate(acc) - } -} - -impl PalletsInfoAccess for (T1, T2) { - fn count() -> usize { - T1::count() + T2::count() - } - fn accumulate(acc: &mut Vec) { - // The AllPallets type tuplises the pallets in reverse order, so we unreverse them here. - T2::accumulate(acc); - T1::accumulate(acc); +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl PalletsInfoAccess for Tuple { + fn infos() -> Vec { + let mut res = vec![]; + for_tuples!( #( res.extend(Tuple::infos()); )* ); + res } } diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 044e5466872f6..9738036f0bd25 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -18,7 +18,8 @@ //! Smaller traits used in FRAME which don't need their own file. use crate::dispatch::Parameter; -use codec::{CompactLen, Decode, DecodeAll, Encode, EncodeLike, Input, MaxEncodedLen}; +use codec::{CompactLen, Decode, DecodeLimit, Encode, EncodeLike, Input, MaxEncodedLen}; +use impl_trait_for_tuples::impl_for_tuples; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_arithmetic::traits::{CheckedAdd, CheckedMul, CheckedSub, Saturating}; #[doc(hidden)] @@ -467,14 +468,18 @@ impl SameOrOther { } /// Handler for when a new account has been created. -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnNewAccount { /// A new account `who` has been registered. fn on_new_account(who: &AccountId); } /// The account with the given id was reaped. -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnKilledAccount { /// The account with the given id was reaped. fn on_killed_account(who: &AccountId); @@ -632,7 +637,9 @@ impl PrivilegeCmp for EqualPrivilegeOnly { /// but cannot preform any alterations. More specifically alterations are /// not forbidden, but they are not persisted in any way after the worker /// has finished. -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OffchainWorker { /// This function is being called after every block import (when fully synced). /// @@ -745,7 +752,10 @@ impl Encode for WrapperOpaque { impl Decode for WrapperOpaque { fn decode(input: &mut I) -> Result { - Ok(Self(T::decode_all(&mut &>::decode(input)?[..])?)) + Ok(Self(T::decode_all_with_depth_limit( + sp_api::MAX_EXTRINSIC_DEPTH, + &mut &>::decode(input)?[..], + )?)) } fn skip(input: &mut I) -> Result<(), codec::Error> { @@ -763,7 +773,7 @@ impl MaxEncodedLen for WrapperOpaque { fn max_encoded_len() -> usize { let t_max_len = T::max_encoded_len(); - // See scale encoding https://docs.substrate.io/v3/advanced/scale-codec + // See scale encoding: https://docs.substrate.io/reference/scale-codec/ if t_max_len < 64 { t_max_len + 1 } else if t_max_len < 2usize.pow(14) { @@ -807,7 +817,7 @@ impl WrapperKeepOpaque { /// /// Returns `None` if the decoding failed. pub fn try_decode(&self) -> Option { - T::decode_all(&mut &self.data[..]).ok() + T::decode_all_with_depth_limit(sp_api::MAX_EXTRINSIC_DEPTH, &mut &self.data[..]).ok() } /// Returns the length of the encoded `T`. @@ -939,6 +949,30 @@ impl PreimageRecipient for () { #[cfg(test)] mod test { use super::*; + use sp_std::marker::PhantomData; + + #[derive(Encode, Decode)] + enum NestedType { + Nested(Box), + Done, + } + + #[test] + fn test_opaque_wrapper_decode_limit() { + let limit = sp_api::MAX_EXTRINSIC_DEPTH as usize; + let mut ok_bytes = vec![0u8; limit]; + ok_bytes.push(1u8); + let mut err_bytes = vec![0u8; limit + 1]; + err_bytes.push(1u8); + assert!(>::decode(&mut &ok_bytes.encode()[..]).is_ok()); + assert!(>::decode(&mut &err_bytes.encode()[..]).is_err()); + + let ok_keep_opaque = WrapperKeepOpaque { data: ok_bytes, _phantom: PhantomData }; + let err_keep_opaque = WrapperKeepOpaque { data: err_bytes, _phantom: PhantomData }; + + assert!(>::try_decode(&ok_keep_opaque).is_some()); + assert!(>::try_decode(&err_keep_opaque).is_none()); + } #[test] fn test_opaque_wrapper() { diff --git a/frame/support/src/traits/storage.rs b/frame/support/src/traits/storage.rs index e484140cc2fd9..d40d82c28e87e 100644 --- a/frame/support/src/traits/storage.rs +++ b/frame/support/src/traits/storage.rs @@ -17,6 +17,7 @@ //! Traits for encoding data related to pallet's storage items. +use impl_trait_for_tuples::impl_for_tuples; use sp_std::prelude::*; /// An instance of a pallet in the storage. @@ -71,7 +72,9 @@ pub trait StorageInfoTrait { fn storage_info() -> Vec; } -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl StorageInfoTrait for Tuple { fn storage_info() -> Vec { let mut res = vec![]; diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs index fbbef6c31822f..294d0e89c8b9e 100644 --- a/frame/support/src/traits/tokens/misc.rs +++ b/frame/support/src/traits/tokens/misc.rs @@ -17,7 +17,7 @@ //! Miscellaneous types. -use codec::{Decode, Encode, FullCodec}; +use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; use sp_core::RuntimeDebug; use sp_runtime::{ArithmeticError, DispatchError, TokenError}; @@ -116,7 +116,9 @@ pub enum ExistenceRequirement { } /// Status of funds. -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] +#[derive( + PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, scale_info::TypeInfo, MaxEncodedLen, +)] pub enum BalanceStatus { /// Funds are free, as corresponding to `free` item in Balances. Free, @@ -126,7 +128,7 @@ pub enum BalanceStatus { bitflags::bitflags! { /// Reasons for moving funds out of an account. - #[derive(Encode, Decode)] + #[derive(Encode, Decode, MaxEncodedLen)] pub struct WithdrawReasons: u8 { /// In order to pay for (system) transaction costs. const TRANSACTION_PAYMENT = 0b00000001; @@ -161,16 +163,29 @@ impl WithdrawReasons { } /// Simple amalgamation trait to collect together properties for an AssetId under one roof. -pub trait AssetId: FullCodec + Copy + Eq + PartialEq + Debug + scale_info::TypeInfo {} -impl AssetId for T {} +pub trait AssetId: + FullCodec + Copy + Eq + PartialEq + Debug + scale_info::TypeInfo + MaxEncodedLen +{ +} +impl AssetId + for T +{ +} /// Simple amalgamation trait to collect together properties for a Balance under one roof. pub trait Balance: - AtLeast32BitUnsigned + FullCodec + Copy + Default + Debug + scale_info::TypeInfo + AtLeast32BitUnsigned + FullCodec + Copy + Default + Debug + scale_info::TypeInfo + MaxEncodedLen { } -impl Balance - for T +impl< + T: AtLeast32BitUnsigned + + FullCodec + + Copy + + Default + + Debug + + scale_info::TypeInfo + + MaxEncodedLen, + > Balance for T { } diff --git a/frame/support/src/traits/voting.rs b/frame/support/src/traits/voting.rs index b5e7d27073895..49ae3163d0cd1 100644 --- a/frame/support/src/traits/voting.rs +++ b/frame/support/src/traits/voting.rs @@ -106,6 +106,19 @@ pub trait VoteTally { fn rejection(class: Class) -> Self; #[cfg(feature = "runtime-benchmarks")] fn from_requirements(support: Perbill, approval: Perbill, class: Class) -> Self; + #[cfg(feature = "runtime-benchmarks")] + /// A function that should be called before any use of the `runtime-benchmarks` gated functions + /// of the `VoteTally` trait. + /// + /// Should be used to set up any needed state in a Pallet which implements `VoteTally` so that + /// benchmarks that execute will complete successfully. `class` can be used to set up a + /// particular class of voters, and `granularity` is used to determine the weight of one vote + /// relative to total unanimity. + /// + /// For example, in the case where there are a number of unique voters, and each voter has equal + /// voting weight, a granularity of `Perbill::from_rational(1, 1000)` should create `1_000` + /// users. + fn setup(class: Class, granularity: Perbill); } pub enum PollStatus { None, diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 24240c7cc4e6a..568cb5535ccda 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -91,27 +91,19 @@ //! # fn main() {} //! ``` //! -//! ### 2. Define weights as a function of input arguments using `FunctionOf` tuple struct. +//! ### 2. Define weights as a function of input arguments. //! -//! This struct works in a similar manner as above. 3 items must be provided and each can be either -//! a fixed value or a function/closure with the same parameters list as the dispatchable function -//! itself, wrapper in a tuple. -//! -//! Using this only makes sense if you want to use a function for at least one of the elements. If -//! all 3 are static values, providing a raw tuple is easier. +//! The arguments of the dispatch are available in the weight expressions as a borrowed value. //! //! ``` //! # use frame_system::Config; -//! # use frame_support::weights::{DispatchClass, FunctionOf, Pays}; +//! # use frame_support::weights::{DispatchClass, Pays}; //! frame_support::decl_module! { //! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = FunctionOf( -//! // weight, function. -//! |args: (&u32, &u64)| *args.0 as u64 + args.1, -//! // class, fixed. +//! #[weight = ( +//! *a as u64 + *b, //! DispatchClass::Operational, -//! // pays fee, function. -//! |args: (&u32, &u64)| if *args.0 > 1000 { Pays::Yes } else { Pays::No }, +//! if *a > 1000 { Pays::Yes } else { Pays::No } //! )] //! fn dispatching(origin, a: u32, b: u64) { unimplemented!() } //! } @@ -131,12 +123,13 @@ mod block_weights; mod extrinsic_weights; mod paritydb_weights; mod rocksdb_weights; +mod weight_v2; use crate::{ dispatch::{DispatchError, DispatchErrorWithPostInfo, DispatchResultWithPostInfo}, traits::Get, }; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -154,18 +147,17 @@ use sp_runtime::{ /// Re-export priority as type pub use sp_runtime::transaction_validity::TransactionPriority; -/// Numeric range of a transaction weight. -pub type Weight = u64; +pub use weight_v2::*; /// These constants are specific to FRAME, and the current implementation of its various components. /// For example: FRAME System, FRAME Executive, our FRAME support libraries, etc... pub mod constants { use super::Weight; - pub const WEIGHT_PER_SECOND: Weight = 1_000_000_000_000; - pub const WEIGHT_PER_MILLIS: Weight = WEIGHT_PER_SECOND / 1000; // 1_000_000_000 - pub const WEIGHT_PER_MICROS: Weight = WEIGHT_PER_MILLIS / 1000; // 1_000_000 - pub const WEIGHT_PER_NANOS: Weight = WEIGHT_PER_MICROS / 1000; // 1_000 + pub const WEIGHT_PER_SECOND: Weight = Weight::from_ref_time(1_000_000_000_000); + pub const WEIGHT_PER_MILLIS: Weight = Weight::from_ref_time(1_000_000_000); + pub const WEIGHT_PER_MICROS: Weight = Weight::from_ref_time(1_000_000); + pub const WEIGHT_PER_NANOS: Weight = Weight::from_ref_time(1_000); // Expose the Block and Extrinsic base weights. pub use super::{block_weights::BlockExecutionWeight, extrinsic_weights::ExtrinsicBaseWeight}; @@ -212,6 +204,12 @@ impl Default for Pays { } } +impl From for PostDispatchInfo { + fn from(pays_fee: Pays) -> Self { + Self { actual_weight: None, pays_fee } + } +} + /// A generalized group of dispatch types. /// /// NOTE whenever upgrading the enum make sure to also update @@ -358,23 +356,13 @@ pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &Dispatc .calc_actual_weight(info) } -impl From<(Option, Pays)> for PostDispatchInfo { - fn from(post_weight_info: (Option, Pays)) -> Self { - let (actual_weight, pays_fee) = post_weight_info; - Self { actual_weight, pays_fee } - } -} - -impl From for PostDispatchInfo { - fn from(pays_fee: Pays) -> Self { - Self { actual_weight: None, pays_fee } - } -} - -impl From> for PostDispatchInfo { - fn from(actual_weight: Option) -> Self { - Self { actual_weight, pays_fee: Default::default() } +/// Extract the actual pays_fee from a dispatch result if any or fall back to the default weight. +pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Pays { + match result { + Ok(post_info) => post_info, + Err(err) => &err.post_info, } + .pays_fee(info) } impl From<()> for PostDispatchInfo { @@ -406,7 +394,7 @@ pub trait WithPostDispatchInfo { /// # Example /// /// ```ignore - /// let who = ensure_signed(origin).map_err(|e| e.with_weight(100))?; + /// let who = ensure_signed(origin).map_err(|e| e.with_weight(Weight::from_ref_time(100)))?; /// ensure!(who == me, Error::::NotMe.with_weight(200_000)); /// ``` fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo; @@ -427,147 +415,6 @@ where } } -impl WeighData for Weight { - fn weigh_data(&self, _: T) -> Weight { - *self - } -} - -impl ClassifyDispatch for Weight { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::Normal - } -} - -impl PaysFee for Weight { - fn pays_fee(&self, _: T) -> Pays { - Pays::Yes - } -} - -impl WeighData for (Weight, DispatchClass, Pays) { - fn weigh_data(&self, _: T) -> Weight { - self.0 - } -} - -impl ClassifyDispatch for (Weight, DispatchClass, Pays) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - self.1 - } -} - -impl PaysFee for (Weight, DispatchClass, Pays) { - fn pays_fee(&self, _: T) -> Pays { - self.2 - } -} - -impl WeighData for (Weight, DispatchClass) { - fn weigh_data(&self, _: T) -> Weight { - self.0 - } -} - -impl ClassifyDispatch for (Weight, DispatchClass) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - self.1 - } -} - -impl PaysFee for (Weight, DispatchClass) { - fn pays_fee(&self, _: T) -> Pays { - Pays::Yes - } -} - -impl WeighData for (Weight, Pays) { - fn weigh_data(&self, _: T) -> Weight { - self.0 - } -} - -impl ClassifyDispatch for (Weight, Pays) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::Normal - } -} - -impl PaysFee for (Weight, Pays) { - fn pays_fee(&self, _: T) -> Pays { - self.1 - } -} - -/// A struct to represent a weight which is a function of the input arguments. The given items have -/// the following types: -/// -/// - `WD`: a raw `Weight` value or a closure that returns a `Weight` with the same argument list as -/// the dispatched, wrapped in a tuple. -/// - `CD`: a raw `DispatchClass` value or a closure that returns a `DispatchClass` with the same -/// argument list as the dispatched, wrapped in a tuple. -/// - `PF`: a `Pays` variant for whether this dispatch pays fee or not or a closure that returns a -/// `Pays` variant with the same argument list as the dispatched, wrapped in a tuple. -#[deprecated = "Function arguments are available directly inside the annotation now."] -pub struct FunctionOf(pub WD, pub CD, pub PF); - -// `WeighData` as a raw value -#[allow(deprecated)] -impl WeighData for FunctionOf { - fn weigh_data(&self, _: Args) -> Weight { - self.0 - } -} - -// `WeighData` as a closure -#[allow(deprecated)] -impl WeighData for FunctionOf -where - WD: Fn(Args) -> Weight, -{ - fn weigh_data(&self, args: Args) -> Weight { - (self.0)(args) - } -} - -// `ClassifyDispatch` as a raw value -#[allow(deprecated)] -impl ClassifyDispatch for FunctionOf { - fn classify_dispatch(&self, _: Args) -> DispatchClass { - self.1 - } -} - -// `ClassifyDispatch` as a raw value -#[allow(deprecated)] -impl ClassifyDispatch for FunctionOf -where - CD: Fn(Args) -> DispatchClass, -{ - fn classify_dispatch(&self, args: Args) -> DispatchClass { - (self.1)(args) - } -} - -// `PaysFee` as a raw value -#[allow(deprecated)] -impl PaysFee for FunctionOf { - fn pays_fee(&self, _: Args) -> Pays { - self.2 - } -} - -// `PaysFee` as a closure -#[allow(deprecated)] -impl PaysFee for FunctionOf -where - PF: Fn(Args) -> Pays, -{ - fn pays_fee(&self, args: Args) -> Pays { - (self.2)(args) - } -} - /// Implementation for unchecked extrinsic. impl GetDispatchInfo for UncheckedExtrinsic @@ -595,30 +442,37 @@ where impl GetDispatchInfo for sp_runtime::testing::TestXt { fn get_dispatch_info(&self) -> DispatchInfo { // for testing: weight == size. - DispatchInfo { weight: self.encode().len() as _, pays_fee: Pays::Yes, ..Default::default() } + DispatchInfo { + weight: Weight::from_ref_time(self.encode().len() as _), + pays_fee: Pays::Yes, + ..Default::default() + } } } /// The weight of database operations that the runtime can invoke. +/// +/// NOTE: This is currently only measured in computational time, and will probably +/// be updated all together once proof size is accounted for. #[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] pub struct RuntimeDbWeight { - pub read: Weight, - pub write: Weight, + pub read: RefTimeWeight, + pub write: RefTimeWeight, } impl RuntimeDbWeight { - pub fn reads(self, r: Weight) -> Weight { - self.read.saturating_mul(r) + pub fn reads(self, r: u64) -> Weight { + Weight::from_ref_time(self.read.saturating_mul(r)) } - pub fn writes(self, w: Weight) -> Weight { - self.write.saturating_mul(w) + pub fn writes(self, w: u64) -> Weight { + Weight::from_ref_time(self.write.saturating_mul(w)) } - pub fn reads_writes(self, r: Weight, w: Weight) -> Weight { + pub fn reads_writes(self, r: u64, w: u64) -> Weight { let read_weight = self.read.saturating_mul(r); let write_weight = self.write.saturating_mul(w); - read_weight.saturating_add(write_weight) + Weight::from_ref_time(read_weight.saturating_add(write_weight)) } } @@ -686,7 +540,8 @@ where Self::polynomial() .iter() .fold(Self::Balance::saturated_from(0u32), |mut acc, args| { - let w = Self::Balance::saturated_from(*weight).saturating_pow(args.degree.into()); + let w = Self::Balance::saturated_from(weight.ref_time()) + .saturating_pow(args.degree.into()); // The sum could get negative. Therefore we only sum with the accumulator. // The Perbill Mul implementation is non overflowing. @@ -716,7 +571,7 @@ where type Balance = T; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(*weight) + Self::Balance::saturated_from(weight.ref_time()) } } @@ -739,12 +594,12 @@ where type Balance = T; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(*weight).saturating_mul(M::get()) + Self::Balance::saturated_from(weight.ref_time()).saturating_mul(M::get()) } } /// A struct holding value for each `DispatchClass`. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct PerDispatchClass { /// Value for `Normal` extrinsics. normal: T, @@ -795,7 +650,7 @@ impl PerDispatchClass { impl PerDispatchClass { /// Returns the total weight consumed by all extrinsics in the block. pub fn total(&self) -> Weight { - let mut sum = 0; + let mut sum = Weight::new(); for class in DispatchClass::all() { sum = sum.saturating_add(*self.get(*class)); } @@ -812,7 +667,7 @@ impl PerDispatchClass { /// occur. pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { let value = self.get_mut(class); - *value = value.checked_add(weight).ok_or(())?; + *value = value.checked_add(&weight).ok_or(())?; Ok(()) } @@ -872,16 +727,16 @@ mod tests { fn f03(_origin) { unimplemented!(); } // weight = a x 10 + b - #[weight = ((_a * 10 + _eb * 1) as Weight, DispatchClass::Normal, Pays::Yes)] + #[weight = ((_a * 10 + _eb * 1) as RefTimeWeight, DispatchClass::Normal, Pays::Yes)] fn f11(_origin, _a: u32, _eb: u32) { unimplemented!(); } #[weight = (0, DispatchClass::Operational, Pays::Yes)] fn f12(_origin, _a: u32, _eb: u32) { unimplemented!(); } - #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] + #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + Weight::from_ref_time(10_000)] fn f20(_origin) { unimplemented!(); } - #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] + #[weight = T::DbWeight::get().reads_writes(6, 5) + Weight::from_ref_time(40_000)] fn f21(_origin) { unimplemented!(); } } @@ -891,71 +746,121 @@ mod tests { fn weights_are_correct() { // #[weight = 1000] let info = Call::::f00 {}.get_dispatch_info(); - assert_eq!(info.weight, 1000); + assert_eq!(info.weight, Weight::from_ref_time(1000)); assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[weight = (1000, DispatchClass::Mandatory)] let info = Call::::f01 {}.get_dispatch_info(); - assert_eq!(info.weight, 1000); + assert_eq!(info.weight, Weight::from_ref_time(1000)); assert_eq!(info.class, DispatchClass::Mandatory); assert_eq!(info.pays_fee, Pays::Yes); // #[weight = (1000, Pays::No)] let info = Call::::f02 {}.get_dispatch_info(); - assert_eq!(info.weight, 1000); + assert_eq!(info.weight, Weight::from_ref_time(1000)); assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::No); // #[weight = (1000, DispatchClass::Operational, Pays::No)] let info = Call::::f03 {}.get_dispatch_info(); - assert_eq!(info.weight, 1000); + assert_eq!(info.weight, Weight::from_ref_time(1000)); assert_eq!(info.class, DispatchClass::Operational); assert_eq!(info.pays_fee, Pays::No); // #[weight = ((_a * 10 + _eb * 1) as Weight, DispatchClass::Normal, Pays::Yes)] let info = Call::::f11 { _a: 13, _eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, 150); // 13*10 + 20 + assert_eq!(info.weight, Weight::from_ref_time(150)); // 13*10 + 20 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[weight = (0, DispatchClass::Operational, Pays::Yes)] let info = Call::::f12 { _a: 10, _eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, 0); + assert_eq!(info.weight, Weight::from_ref_time(0)); assert_eq!(info.class, DispatchClass::Operational); assert_eq!(info.pays_fee, Pays::Yes); // #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] let info = Call::::f20 {}.get_dispatch_info(); - assert_eq!(info.weight, 12300); // 100*3 + 1000*2 + 10_1000 + assert_eq!(info.weight, Weight::from_ref_time(12300)); // 100*3 + 1000*2 + 10_1000 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] let info = Call::::f21 {}.get_dispatch_info(); - assert_eq!(info.weight, 45600); // 100*6 + 1000*5 + 40_1000 + assert_eq!(info.weight, Weight::from_ref_time(45600)); // 100*6 + 1000*5 + 40_1000 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); } #[test] fn extract_actual_weight_works() { - let pre = DispatchInfo { weight: 1000, ..Default::default() }; - assert_eq!(extract_actual_weight(&Ok(Some(7).into()), &pre), 7); - assert_eq!(extract_actual_weight(&Ok(Some(1000).into()), &pre), 1000); - assert_eq!(extract_actual_weight(&Err(DispatchError::BadOrigin.with_weight(9)), &pre), 9); + let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; + assert_eq!(extract_actual_weight(&Ok(Some(7).into()), &pre), Weight::from_ref_time(7)); + assert_eq!( + extract_actual_weight(&Ok(Some(1000).into()), &pre), + Weight::from_ref_time(1000) + ); + assert_eq!( + extract_actual_weight( + &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(9))), + &pre + ), + Weight::from_ref_time(9) + ); } #[test] fn extract_actual_weight_caps_at_pre_weight() { - let pre = DispatchInfo { weight: 1000, ..Default::default() }; - assert_eq!(extract_actual_weight(&Ok(Some(1250).into()), &pre), 1000); + let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; + assert_eq!( + extract_actual_weight(&Ok(Some(1250).into()), &pre), + Weight::from_ref_time(1000) + ); assert_eq!( - extract_actual_weight(&Err(DispatchError::BadOrigin.with_weight(1300)), &pre), - 1000 + extract_actual_weight( + &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(1300))), + &pre + ), + Weight::from_ref_time(1000), ); } + #[test] + fn extract_actual_pays_fee_works() { + let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; + assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::Yes); + assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::Yes); + assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::Yes); + assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::No).into()), &pre), Pays::No); + assert_eq!( + extract_actual_pays_fee( + &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(9))), + &pre + ), + Pays::Yes + ); + assert_eq!( + extract_actual_pays_fee( + &Err(DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }, + error: DispatchError::BadOrigin, + }), + &pre + ), + Pays::No + ); + + let pre = DispatchInfo { + weight: Weight::from_ref_time(1000), + pays_fee: Pays::No, + ..Default::default() + }; + assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::No); + assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::No); + assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::No); + } + type Balance = u64; // 0.5x^3 + 2.333x^2 + 7x - 10_000 @@ -996,40 +901,52 @@ mod tests { #[test] fn polynomial_works() { // 100^3/2=500000 100^2*(2+1/3)=23333 700 -10000 - assert_eq!(Poly::weight_to_fee(&100), 514033); + assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(100)), 514033); // 10123^3/2=518677865433 10123^2*(2+1/3)=239108634 70861 -10000 - assert_eq!(Poly::weight_to_fee(&10_123), 518917034928); + assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(10_123)), 518917034928); } #[test] fn polynomial_does_not_underflow() { - assert_eq!(Poly::weight_to_fee(&0), 0); - assert_eq!(Poly::weight_to_fee(&10), 0); + assert_eq!(Poly::weight_to_fee(&Weight::zero()), 0); + assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(10)), 0); } #[test] fn polynomial_does_not_overflow() { - assert_eq!(Poly::weight_to_fee(&Weight::max_value()), Balance::max_value() - 10_000); + assert_eq!(Poly::weight_to_fee(&Weight::MAX), Balance::max_value() - 10_000); } #[test] fn identity_fee_works() { - assert_eq!(IdentityFee::::weight_to_fee(&0), 0); - assert_eq!(IdentityFee::::weight_to_fee(&50), 50); - assert_eq!( - IdentityFee::::weight_to_fee(&Weight::max_value()), - Balance::max_value() - ); + assert_eq!(IdentityFee::::weight_to_fee(&Weight::zero()), 0); + assert_eq!(IdentityFee::::weight_to_fee(&Weight::from_ref_time(50)), 50); + assert_eq!(IdentityFee::::weight_to_fee(&Weight::MAX), Balance::max_value()); } #[test] fn constant_fee_works() { use crate::traits::ConstU128; - assert_eq!(ConstantMultiplier::>::weight_to_fee(&0), 0); - assert_eq!(ConstantMultiplier::>::weight_to_fee(&50), 500); - assert_eq!(ConstantMultiplier::>::weight_to_fee(&16), 16384); assert_eq!( - ConstantMultiplier::>::weight_to_fee(&2), + ConstantMultiplier::>::weight_to_fee(&Weight::zero()), + 0 + ); + assert_eq!( + ConstantMultiplier::>::weight_to_fee(&Weight::from_ref_time( + 50 + )), + 500 + ); + assert_eq!( + ConstantMultiplier::>::weight_to_fee(&Weight::from_ref_time( + 16 + )), + 16384 + ); + assert_eq!( + ConstantMultiplier::>::weight_to_fee( + &Weight::from_ref_time(2) + ), u128::MAX ); } diff --git a/frame/support/src/weights/block_weights.rs b/frame/support/src/weights/block_weights.rs index b86334514af2f..93a80d12db96b 100644 --- a/frame/support/src/weights/block_weights.rs +++ b/frame/support/src/weights/block_weights.rs @@ -53,7 +53,7 @@ parameter_types! { /// 99th: 5_489_273 /// 95th: 5_433_314 /// 75th: 5_354_812 - pub const BlockExecutionWeight: Weight = 5_346_284 * WEIGHT_PER_NANOS; + pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.scalar_saturating_mul(5_346_284); } #[cfg(test)] diff --git a/frame/support/src/weights/extrinsic_weights.rs b/frame/support/src/weights/extrinsic_weights.rs index b8a52c164d8fe..d223eb7c10efc 100644 --- a/frame/support/src/weights/extrinsic_weights.rs +++ b/frame/support/src/weights/extrinsic_weights.rs @@ -53,7 +53,7 @@ parameter_types! { /// 99th: 86_924 /// 95th: 86_828 /// 75th: 86_347 - pub const ExtrinsicBaseWeight: Weight = 86_298 * WEIGHT_PER_NANOS; + pub const ExtrinsicBaseWeight: Weight = WEIGHT_PER_NANOS.scalar_saturating_mul(86_298); } #[cfg(test)] diff --git a/frame/support/src/weights/paritydb_weights.rs b/frame/support/src/weights/paritydb_weights.rs index 572187ba78a92..f498991729d7a 100644 --- a/frame/support/src/weights/paritydb_weights.rs +++ b/frame/support/src/weights/paritydb_weights.rs @@ -25,8 +25,8 @@ pub mod constants { /// ParityDB can be enabled with a feature flag, but is still experimental. These weights /// are available for brave runtime engineers who may want to try this out as default. pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 8_000 * constants::WEIGHT_PER_NANOS, - write: 50_000 * constants::WEIGHT_PER_NANOS, + read: 8_000 * constants::WEIGHT_PER_NANOS.ref_time(), + write: 50_000 * constants::WEIGHT_PER_NANOS.ref_time(), }; } diff --git a/frame/support/src/weights/rocksdb_weights.rs b/frame/support/src/weights/rocksdb_weights.rs index f37964dcbd825..67571c4723f94 100644 --- a/frame/support/src/weights/rocksdb_weights.rs +++ b/frame/support/src/weights/rocksdb_weights.rs @@ -25,8 +25,8 @@ pub mod constants { /// By default, Substrate uses RocksDB, so this will be the weight used throughout /// the runtime. pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 25_000 * constants::WEIGHT_PER_NANOS, - write: 100_000 * constants::WEIGHT_PER_NANOS, + read: 25_000 * constants::WEIGHT_PER_NANOS.ref_time(), + write: 100_000 * constants::WEIGHT_PER_NANOS.ref_time(), }; } diff --git a/frame/support/src/weights/weight_v2.rs b/frame/support/src/weights/weight_v2.rs new file mode 100644 index 0000000000000..d02aac3268838 --- /dev/null +++ b/frame/support/src/weights/weight_v2.rs @@ -0,0 +1,483 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; +use core::ops::{Add, AddAssign, Div, Mul, Sub, SubAssign}; +use sp_runtime::{ + traits::{Bounded, CheckedAdd, CheckedSub, One, Zero}, + Perquintill, RuntimeDebug, +}; + +use super::*; + +/// The unit of measurement for computational time spent when executing runtime logic on reference +/// hardware. +pub type RefTimeWeight = u64; + +#[derive( + Encode, + Decode, + MaxEncodedLen, + TypeInfo, + Eq, + PartialEq, + Copy, + Clone, + RuntimeDebug, + Default, + Ord, + PartialOrd, + CompactAs, +)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct Weight { + /// The weight of computational time used based on some reference hardware. + ref_time: RefTimeWeight, +} + +impl Weight { + /// Create a new Weight with zero. + pub const fn new() -> Self { + Self { ref_time: 0 } + } + + /// Set the reference time part of the weight. + pub const fn set_ref_time(mut self, c: RefTimeWeight) -> Self { + self.ref_time = c; + self + } + + /// Return the reference time part of the weight. + pub const fn ref_time(&self) -> RefTimeWeight { + self.ref_time + } + + /// Return a mutable reference time part of the weight. + pub fn ref_time_mut(&mut self) -> &mut RefTimeWeight { + &mut self.ref_time + } + + pub const MAX: Self = Self { ref_time: RefTimeWeight::MAX }; + + /// Get the conservative min of `self` and `other` weight. + pub fn min(&self, other: Self) -> Self { + Self { ref_time: self.ref_time.min(other.ref_time) } + } + + /// Get the aggressive max of `self` and `other` weight. + pub fn max(&self, other: Self) -> Self { + Self { ref_time: self.ref_time.max(other.ref_time) } + } + + /// Try to add some `other` weight while upholding the `limit`. + pub fn try_add(&self, other: &Self, limit: &Self) -> Option { + let total = self.checked_add(other)?; + if total.ref_time > limit.ref_time { + None + } else { + Some(total) + } + } + + /// Construct with reference time weight. + pub const fn from_ref_time(ref_time: RefTimeWeight) -> Self { + Self { ref_time } + } + + pub fn checked_mul(self, rhs: u64) -> Option { + let ref_time = self.ref_time.checked_mul(rhs)?; + Some(Self { ref_time }) + } + + pub fn checked_div(self, rhs: u64) -> Option { + let ref_time = self.ref_time.checked_div(rhs)?; + Some(Self { ref_time }) + } + + pub const fn scalar_saturating_mul(self, rhs: u64) -> Self { + Self { ref_time: self.ref_time.saturating_mul(rhs) } + } + + pub const fn scalar_div(self, rhs: u64) -> Self { + Self { ref_time: self.ref_time / rhs } + } +} + +impl Zero for Weight { + fn zero() -> Self { + Self::zero() + } + + fn is_zero(&self) -> bool { + self.ref_time == 0 + } +} + +impl One for Weight { + fn one() -> Self { + Self::one() + } +} + +impl Add for Weight { + type Output = Self; + fn add(self, rhs: Self) -> Self { + Self { ref_time: self.ref_time + rhs.ref_time } + } +} + +impl Sub for Weight { + type Output = Self; + fn sub(self, rhs: Self) -> Self { + Self { ref_time: self.ref_time - rhs.ref_time } + } +} + +impl Mul for Weight { + type Output = Self; + fn mul(self, b: Self) -> Self { + Self { ref_time: b.ref_time * self.ref_time } + } +} + +impl Mul for Weight +where + T: Mul + Copy, +{ + type Output = Self; + fn mul(self, b: T) -> Self { + Self { ref_time: b * self.ref_time } + } +} + +impl Mul for Perbill { + type Output = Weight; + fn mul(self, b: Weight) -> Weight { + Weight { ref_time: self * b.ref_time } + } +} + +impl Mul for Perquintill { + type Output = Weight; + fn mul(self, b: Weight) -> Weight { + Weight { ref_time: self * b.ref_time } + } +} + +impl Mul for u64 { + type Output = Weight; + fn mul(self, b: Weight) -> Weight { + Weight { ref_time: self * b.ref_time } + } +} + +impl Div for Weight +where + u64: Div, + T: Copy, +{ + type Output = Self; + fn div(self, b: T) -> Self { + Self { ref_time: self.ref_time / b } + } +} + +impl Saturating for Weight { + fn saturating_add(self, rhs: Self) -> Self { + self.saturating_add(rhs) + } + + fn saturating_sub(self, rhs: Self) -> Self { + self.saturating_sub(rhs) + } + + fn saturating_mul(self, rhs: Self) -> Self { + self.saturating_mul(rhs) + } + + fn saturating_pow(self, exp: usize) -> Self { + self.saturating_pow(exp) + } +} + +impl CheckedAdd for Weight { + fn checked_add(&self, rhs: &Self) -> Option { + self.checked_add(rhs) + } +} + +impl CheckedSub for Weight { + fn checked_sub(&self, rhs: &Self) -> Option { + self.checked_sub(rhs) + } +} + +impl PaysFee for (Weight, DispatchClass, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.2 + } +} + +impl WeighData for (Weight, DispatchClass) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl WeighData for (Weight, DispatchClass, Pays) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (Weight, DispatchClass) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl PaysFee for (Weight, DispatchClass) { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl WeighData for (Weight, Pays) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (Weight, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for (Weight, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.1 + } +} + +impl From<(Option, Pays)> for PostDispatchInfo { + fn from(post_weight_info: (Option, Pays)) -> Self { + let (actual_weight, pays_fee) = post_weight_info; + Self { actual_weight, pays_fee } + } +} + +impl From> for PostDispatchInfo { + fn from(actual_weight: Option) -> Self { + Self { actual_weight, pays_fee: Default::default() } + } +} + +impl WeighData for Weight { + fn weigh_data(&self, _: T) -> Weight { + return *self + } +} + +impl ClassifyDispatch for Weight { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for Weight { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl ClassifyDispatch for (Weight, DispatchClass, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl core::fmt::Display for Weight { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "Weight(ref_time: {})", self.ref_time) + } +} + +impl Bounded for Weight { + fn min_value() -> Self { + Zero::zero() + } + fn max_value() -> Self { + Self::MAX + } +} + +impl AddAssign for Weight { + fn add_assign(&mut self, other: Self) { + *self = Self { ref_time: self.ref_time + other.ref_time }; + } +} + +impl SubAssign for Weight { + fn sub_assign(&mut self, other: Self) { + *self = Self { ref_time: self.ref_time - other.ref_time }; + } +} + +impl sp_runtime::traits::Printable for Weight { + fn print(&self) { + self.ref_time().print() + } +} + +// Re-export common functions so you do not need to import trait. +impl Weight { + pub const fn zero() -> Self { + Self { ref_time: 0 } + } + + pub const fn one() -> Self { + Self { ref_time: 1 } + } + + pub const fn saturating_add(self, rhs: Self) -> Self { + Self { ref_time: self.ref_time.saturating_add(rhs.ref_time) } + } + + pub const fn saturating_sub(self, rhs: Self) -> Self { + Self { ref_time: self.ref_time.saturating_sub(rhs.ref_time) } + } + + pub const fn saturating_mul(self, rhs: Self) -> Self { + Self { ref_time: self.ref_time.saturating_mul(rhs.ref_time) } + } + + pub const fn saturating_pow(self, exp: usize) -> Self { + Self { ref_time: self.ref_time.saturating_pow(exp as u32) } + } + + pub const fn checked_add(&self, rhs: &Self) -> Option { + match self.ref_time.checked_add(rhs.ref_time) { + Some(ref_time) => Some(Self { ref_time }), + None => None, + } + } + + pub const fn checked_sub(&self, rhs: &Self) -> Option { + match self.ref_time.checked_sub(rhs.ref_time) { + Some(ref_time) => Some(Self { ref_time }), + None => None, + } + } +} + +// TODO: Eventually remove these + +impl From> for PostDispatchInfo { + fn from(maybe_actual_computation: Option) -> Self { + let actual_weight = match maybe_actual_computation { + Some(actual_computation) => Some(Weight::new().set_ref_time(actual_computation)), + None => None, + }; + Self { actual_weight, pays_fee: Default::default() } + } +} + +impl From<(Option, Pays)> for PostDispatchInfo { + fn from(post_weight_info: (Option, Pays)) -> Self { + let (maybe_actual_time, pays_fee) = post_weight_info; + let actual_weight = match maybe_actual_time { + Some(actual_time) => Some(Weight::new().set_ref_time(actual_time)), + None => None, + }; + Self { actual_weight, pays_fee } + } +} + +impl WeighData for RefTimeWeight { + fn weigh_data(&self, _: T) -> Weight { + return Weight::new().set_ref_time(*self) + } +} + +impl ClassifyDispatch for RefTimeWeight { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for RefTimeWeight { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl WeighData for (RefTimeWeight, DispatchClass, Pays) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (RefTimeWeight, DispatchClass, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl PaysFee for (RefTimeWeight, DispatchClass, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.2 + } +} + +impl WeighData for (RefTimeWeight, DispatchClass) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (RefTimeWeight, DispatchClass) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl PaysFee for (RefTimeWeight, DispatchClass) { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl WeighData for (RefTimeWeight, Pays) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (RefTimeWeight, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for (RefTimeWeight, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.1 + } +} + +// END TODO diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 63747a9d560dc..aed98579a0fd8 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -282,94 +282,96 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 31, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module2::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 32, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_2::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 33, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - NestedModule3::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 34, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_3::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 6, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_4::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 3, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_5::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 4, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_6::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 1, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_7::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 2, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_8::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 12, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_9::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 13, - error: [0; 4], - message: Some("Something") - })), - ); + sp_io::TestExternalities::default().execute_with(|| { + assert_eq!( + Module1_1::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 31, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module2::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 32, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_2::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 33, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + NestedModule3::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 34, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_3::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 6, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_4::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 3, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_5::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 4, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_6::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 1, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_7::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 2, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_8::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 12, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_9::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 13, + error: [0; 4], + message: Some("Something") + })), + ); + }); } #[test] @@ -501,17 +503,25 @@ fn call_encode_is_correct_and_decode_works() { fn call_weight_should_attach_to_call_enum() { use frame_support::{ dispatch::{DispatchInfo, GetDispatchInfo}, - weights::{DispatchClass, Pays}, + weights::{DispatchClass, Pays, Weight}, }; // operational. assert_eq!( module3::Call::::operational {}.get_dispatch_info(), - DispatchInfo { weight: 5, class: DispatchClass::Operational, pays_fee: Pays::Yes }, + DispatchInfo { + weight: Weight::from_ref_time(5), + class: DispatchClass::Operational, + pays_fee: Pays::Yes + }, ); // custom basic assert_eq!( module3::Call::::aux_4 {}.get_dispatch_info(), - DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes }, + DispatchInfo { + weight: Weight::from_ref_time(3), + class: DispatchClass::Normal, + pays_fee: Pays::Yes + }, ); } diff --git a/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs new file mode 100644 index 0000000000000..79b5632babd95 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs @@ -0,0 +1,14 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + #[cfg(test)] + System: frame_system::{Pallet, Call, Storage, Config, Event}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr new file mode 100644 index 0000000000000..a86a839615aa0 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr @@ -0,0 +1,5 @@ +error: `System` pallet declaration is feature gated, please remove any `#[cfg]` attributes + --> tests/construct_runtime_ui/feature_gated_system_pallet.rs:10:3 + | +10 | System: frame_system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs new file mode 100644 index 0000000000000..a1d39fa76ea85 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs @@ -0,0 +1,15 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet}, + #[cfg(feature = 1)] + Balance: balances::{Config, Call}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr new file mode 100644 index 0000000000000..68366a3410bf1 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr @@ -0,0 +1,6 @@ +error: feature = 1 + ^ expected one of ``, `all`, `any`, `not` here + --> tests/construct_runtime_ui/invalid_meta_literal.rs:10:3 + | +10 | #[cfg(feature = 1)] + | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr index 404c0c3627b7b..1f08ab87c1f79 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr @@ -29,6 +29,19 @@ help: consider importing this struct | 1 | use frame_system::GenesisConfig; | +help: if you import `GenesisConfig`, refer to it directly + | +40 - construct_runtime! { +41 - pub enum Runtime where +42 - Block = Block, +43 - NodeBlock = Block, +44 - UncheckedExtrinsic = UncheckedExtrinsic +45 - { +46 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +47 - Pallet: test_pallet::{Pallet, Config}, +48 - } +49 - } + | error[E0283]: type annotations needed --> tests/construct_runtime_ui/no_std_genesis_config.rs:40:1 diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index 31229f8c93cb6..2af4d3fb15000 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -32,6 +32,19 @@ help: consider importing this enum | 1 | use frame_system::Event; | +help: if you import `Event`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Event}, +57 - } +58 - } + | error[E0412]: cannot find type `Event` in module `pallet` --> tests/construct_runtime_ui/undefined_event_part.rs:49:1 @@ -52,3 +65,16 @@ help: consider importing one of these items | 1 | use frame_system::Event; | +help: if you import `Event`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Event}, +57 - } +58 - } + | diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index fa1cee1ac7e2f..1bc109a45ac57 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -32,6 +32,19 @@ help: consider importing this struct | 1 | use frame_system::GenesisConfig; | +help: if you import `GenesisConfig`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Config}, +57 - } +58 - } + | error[E0283]: type annotations needed --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:49:1 diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index 06e845618d44f..c692cd61bae8b 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -32,6 +32,19 @@ help: consider importing this type alias | 1 | use frame_system::Origin; | +help: if you import `Origin`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Origin}, +57 - } +58 - } + | error[E0412]: cannot find type `Origin` in module `pallet` --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 @@ -52,6 +65,19 @@ help: consider importing one of these items | 1 | use frame_system::Origin; | +help: if you import `Origin`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Origin}, +57 - } +58 - } + | error[E0282]: type annotations needed --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs new file mode 100644 index 0000000000000..b93adf9a780a7 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs @@ -0,0 +1,15 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet}, + #[cfg(feature(test))] + Balance: balances::{Config, Call}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr new file mode 100644 index 0000000000000..98d99a0d34997 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr @@ -0,0 +1,6 @@ +error: feature(test) + ^ expected one of `=`, `,`, `)` here + --> tests/construct_runtime_ui/unsupported_meta_structure.rs:10:3 + | +10 | #[cfg(feature(test))] + | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs new file mode 100644 index 0000000000000..3ec8b9db1d435 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs @@ -0,0 +1,15 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet}, + #[attr] + Balance: balances::{Config, Call}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr new file mode 100644 index 0000000000000..fceb2b8a99db8 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr @@ -0,0 +1,5 @@ +error: Unsupported attribute, only #[cfg] is supported on pallet declarations in `construct_runtime` + --> tests/construct_runtime_ui/unsupported_pallet_attr.rs:10:3 + | +10 | #[attr] + | ^ diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr index 86c427d8080be..880695d9b77e2 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -1,5 +1,5 @@ error: `integrity_test` can only be passed once as input. - --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 + --> tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs:1:1 | 1 | / frame_support::decl_module! { 2 | | pub struct Module for enum Call where origin: T::Origin, system=self { @@ -13,13 +13,7 @@ error: `integrity_test` can only be passed once as input. = note: this error originates in the macro `$crate::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0601]: `main` function not found in crate `$CRATE` - --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 + --> tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs:7:2 | -1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { -3 | | fn integrity_test() {} -4 | | -5 | | fn integrity_test() {} -6 | | } -7 | | } - | |_^ consider adding a `main` function to `$DIR/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs` +7 | } + | ^ consider adding a `main` function to `$DIR/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs` diff --git a/frame/support/test/tests/decl_storage.rs b/frame/support/test/tests/decl_storage.rs index be5d70be17f69..7ce43cd5d44d1 100644 --- a/frame/support/test/tests/decl_storage.rs +++ b/frame/support/test/tests/decl_storage.rs @@ -797,7 +797,7 @@ mod test_append_and_len { TestExternalities::default().execute_with(|| { let key = JustVec::hashed_key(); // Set it to some invalid value. - frame_support::storage::unhashed::put_raw(&key, &*b"1"); + frame_support::storage::unhashed::put_raw(&key, b"1"); assert!(JustVec::get().is_empty()); assert_eq!(frame_support::storage::unhashed::get_raw(&key), Some(b"1".to_vec())); diff --git a/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr b/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr index 64f844e547be0..1c230db376a49 100644 --- a/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr @@ -1,7 +1,5 @@ error[E0369]: binary operation `==` cannot be applied to type `::C` - --> $DIR/partial_eq.rs:7:2 + --> tests/derive_no_bound_ui/partial_eq.rs:7:2 | 7 | c: T::C, | ^ - | - = note: the trait `std::cmp::PartialEq` is not implemented for `::C` diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 6b72327eb4989..907a52f1184cc 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -23,7 +23,7 @@ use frame_support::{ ConstU32, GetCallName, GetStorageVersion, OnFinalize, OnGenesis, OnInitialize, OnRuntimeUpgrade, PalletError, PalletInfoAccess, StorageVersion, }, - weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays, RuntimeDbWeight}, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays, RuntimeDbWeight, Weight}, }; use scale_info::{meta_type, TypeInfo}; use sp_io::{ @@ -165,7 +165,7 @@ pub mod pallet { let _ = T::AccountId::from(SomeType1); // Test for where clause let _ = T::AccountId::from(SomeType2); // Test for where clause Self::deposit_event(Event::Something(10)); - 10 + Weight::from_ref_time(10) } fn on_finalize(_: BlockNumberFor) { let _ = T::AccountId::from(SomeType1); // Test for where clause @@ -176,7 +176,7 @@ pub mod pallet { let _ = T::AccountId::from(SomeType1); // Test for where clause let _ = T::AccountId::from(SomeType2); // Test for where clause Self::deposit_event(Event::Something(30)); - 30 + Weight::from_ref_time(30) } fn integrity_test() { let _ = T::AccountId::from(SomeType1); // Test for where clause @@ -190,7 +190,7 @@ pub mod pallet { T::AccountId: From + From + SomeAssociation1, { /// Doc comment put in metadata - #[pallet::weight(Weight::from(*_foo))] + #[pallet::weight(Weight::from_ref_time(*_foo as u64))] pub fn foo( origin: OriginFor, #[pallet::compact] _foo: u32, @@ -229,6 +229,7 @@ pub mod pallet { pub enum Error { /// doc comment put into metadata InsufficientProposersBalance, + NonExistentStorageValue, Code(u8), #[codec(skip)] Skipped(u128), @@ -282,6 +283,10 @@ pub mod pallet { pub type Map2 = StorageMap>; + #[pallet::storage] + pub type Map3 = + StorageMap<_, Blake2_128Concat, u32, u64, ResultQuery::NonExistentStorageValue>>; + #[pallet::storage] pub type DoubleMap = StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; @@ -295,6 +300,17 @@ pub mod pallet { MaxValues = ConstU32<5>, >; + #[pallet::storage] + pub type DoubleMap3 = StorageDoubleMap< + _, + Blake2_128Concat, + u32, + Twox64Concat, + u64, + u128, + ResultQuery::NonExistentStorageValue>, + >; + #[pallet::storage] #[pallet::getter(fn nmap)] pub type NMap = StorageNMap<_, storage::Key, u32>; @@ -307,6 +323,15 @@ pub mod pallet { MaxValues = ConstU32<11>, >; + #[pallet::storage] + #[pallet::getter(fn nmap3)] + pub type NMap3 = StorageNMap< + _, + (NMapKey, NMapKey), + u128, + ResultQuery::NonExistentStorageValue>, + >; + #[pallet::storage] #[pallet::getter(fn conditional_value)] #[cfg(feature = "conditional-storage")] @@ -467,14 +492,14 @@ pub mod pallet2 { { fn on_initialize(_: BlockNumberFor) -> Weight { Self::deposit_event(Event::Something(11)); - 0 + Weight::zero() } fn on_finalize(_: BlockNumberFor) { Self::deposit_event(Event::Something(21)); } fn on_runtime_upgrade() -> Weight { Self::deposit_event(Event::Something(31)); - 0 + Weight::zero() } } @@ -601,6 +626,8 @@ frame_support::construct_runtime!( System: frame_system exclude_parts { Pallet, Storage }, Example: pallet, Example2: pallet2 exclude_parts { Call }, + #[cfg(feature = "example3")] + Example3: pallet3, Example4: pallet4 use_parts { Call }, } ); @@ -641,7 +668,11 @@ fn call_expand() { let call_foo = pallet::Call::::foo { foo: 3, bar: 0 }; assert_eq!( call_foo.get_dispatch_info(), - DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } + DispatchInfo { + weight: frame_support::weights::Weight::from_ref_time(3), + class: DispatchClass::Normal, + pays_fee: Pays::Yes + } ); assert_eq!(call_foo.get_call_name(), "foo"); assert_eq!( @@ -934,6 +965,16 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); + pallet::Map3::::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"Map3")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u64)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + pallet::Map3::::get(2), + Err(pallet::Error::::NonExistentStorageValue), + ); + pallet::DoubleMap::::insert(&1, &2, &3); let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -948,6 +989,17 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + pallet::DoubleMap3::::insert(&1, &2, &3); + let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap3")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + k.extend(2u64.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + pallet::DoubleMap3::::get(2, 3), + Err(pallet::Error::::NonExistentStorageValue), + ); + pallet::NMap::::insert((&1,), &3); let mut k = [twox_128(b"Example"), twox_128(b"NMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -961,6 +1013,17 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + pallet::NMap3::::insert((&1, &2), &3); + let mut k = [twox_128(b"Example"), twox_128(b"NMap3")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + pallet::NMap3::::get((2, 3)), + Err(pallet::Error::::NonExistentStorageValue), + ); + #[cfg(feature = "conditional-storage")] { pallet::ConditionalValue::::put(1); @@ -987,10 +1050,10 @@ fn pallet_hooks_expand() { TestExternalities::default().execute_with(|| { frame_system::Pallet::::set_block_number(1); - assert_eq!(AllPalletsWithoutSystem::on_initialize(1), 10); + assert_eq!(AllPalletsWithoutSystem::on_initialize(1), Weight::from_ref_time(10)); AllPalletsWithoutSystem::on_finalize(1); - assert_eq!(AllPalletsWithoutSystem::on_runtime_upgrade(), 30); + assert_eq!(AllPalletsWithoutSystem::on_runtime_upgrade(), Weight::from_ref_time(30)); assert_eq!( frame_system::Pallet::::events()[0].event, @@ -1026,10 +1089,16 @@ fn all_pallets_type_reversed_order_is_correct() { #[allow(deprecated)] { - assert_eq!(AllPalletsWithoutSystemReversed::on_initialize(1), 10); + assert_eq!( + AllPalletsWithoutSystemReversed::on_initialize(1), + Weight::from_ref_time(10) + ); AllPalletsWithoutSystemReversed::on_finalize(1); - assert_eq!(AllPalletsWithoutSystemReversed::on_runtime_upgrade(), 30); + assert_eq!( + AllPalletsWithoutSystemReversed::on_runtime_upgrade(), + Weight::from_ref_time(30) + ); } assert_eq!( @@ -1096,7 +1165,7 @@ fn migrate_from_pallet_version_to_storage_version() { >(&db_weight); // 4 pallets, 2 writes and every write costs 5 weight. - assert_eq!(4 * 2 * 5, weight); + assert_eq!(Weight::from_ref_time(4 * 2 * 5), weight); // All pallet versions should be removed assert!(sp_io::storage::get(&pallet_version_key(Example::name())).is_none()); @@ -1171,6 +1240,17 @@ fn metadata() { default: vec![0], docs: vec![], }, + StorageEntryMetadata { + name: "Map3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::(), + value: meta_type::(), + hashers: vec![StorageHasher::Blake2_128Concat], + }, + default: vec![1, 1], + docs: vec![], + }, StorageEntryMetadata { name: "DoubleMap", modifier: StorageEntryModifier::Optional, @@ -1199,6 +1279,20 @@ fn metadata() { default: vec![0], docs: vec![], }, + StorageEntryMetadata { + name: "DoubleMap3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + value: meta_type::(), + key: meta_type::<(u32, u64)>(), + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ], + }, + default: vec![1, 1], + docs: vec![], + }, StorageEntryMetadata { name: "NMap", modifier: StorageEntryModifier::Optional, @@ -1224,6 +1318,20 @@ fn metadata() { default: vec![0], docs: vec![], }, + StorageEntryMetadata { + name: "NMap3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::<(u8, u16)>(), + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ], + value: meta_type::(), + }, + default: vec![1, 1], + docs: vec![], + }, #[cfg(feature = "conditional-storage")] StorageEntryMetadata { name: "ConditionalValue", @@ -1436,6 +1544,8 @@ fn test_storage_info() { traits::{StorageInfo, StorageInfoTrait}, }; + // Storage max size is calculated by adding up all the hasher size, the key type size and the + // value type size assert_eq!( Example::storage_info(), vec![ @@ -1465,42 +1575,63 @@ fn test_storage_info() { storage_name: b"Map".to_vec(), prefix: prefix(b"Example", b"Map").to_vec(), max_values: None, - max_size: Some(3 + 16), + max_size: Some(16 + 1 + 2), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"Map2".to_vec(), prefix: prefix(b"Example", b"Map2").to_vec(), max_values: Some(3), - max_size: Some(6 + 8), + max_size: Some(8 + 2 + 4), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"Map3".to_vec(), + prefix: prefix(b"Example", b"Map3").to_vec(), + max_values: None, + max_size: Some(16 + 4 + 8), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"DoubleMap".to_vec(), prefix: prefix(b"Example", b"DoubleMap").to_vec(), max_values: None, - max_size: Some(7 + 16 + 8), + max_size: Some(16 + 1 + 8 + 2 + 4), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"DoubleMap2".to_vec(), prefix: prefix(b"Example", b"DoubleMap2").to_vec(), max_values: Some(5), - max_size: Some(14 + 8 + 16), + max_size: Some(8 + 2 + 16 + 4 + 8), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"DoubleMap3".to_vec(), + prefix: prefix(b"Example", b"DoubleMap3").to_vec(), + max_values: None, + max_size: Some(16 + 4 + 8 + 8 + 16), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"NMap".to_vec(), prefix: prefix(b"Example", b"NMap").to_vec(), max_values: None, - max_size: Some(5 + 16), + max_size: Some(16 + 1 + 4), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"NMap2".to_vec(), prefix: prefix(b"Example", b"NMap2").to_vec(), max_values: Some(11), - max_size: Some(14 + 8 + 16), + max_size: Some(8 + 2 + 16 + 4 + 8), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"NMap3".to_vec(), + prefix: prefix(b"Example", b"NMap3").to_vec(), + max_values: None, + max_size: Some(16 + 1 + 8 + 2 + 16), }, #[cfg(feature = "conditional-storage")] { @@ -1519,7 +1650,7 @@ fn test_storage_info() { storage_name: b"ConditionalMap".to_vec(), prefix: prefix(b"Example", b"ConditionalMap").to_vec(), max_values: Some(12), - max_size: Some(6 + 8), + max_size: Some(8 + 2 + 4), } }, #[cfg(feature = "conditional-storage")] @@ -1529,7 +1660,7 @@ fn test_storage_info() { storage_name: b"ConditionalDoubleMap".to_vec(), prefix: prefix(b"Example", b"ConditionalDoubleMap").to_vec(), max_values: None, - max_size: Some(7 + 16 + 8), + max_size: Some(16 + 1 + 8 + 2 + 4), } }, #[cfg(feature = "conditional-storage")] @@ -1539,7 +1670,7 @@ fn test_storage_info() { storage_name: b"ConditionalNMap".to_vec(), prefix: prefix(b"Example", b"ConditionalNMap").to_vec(), max_values: None, - max_size: Some(7 + 16 + 8), + max_size: Some(16 + 1 + 8 + 2 + 4), } }, StorageInfo { @@ -1547,7 +1678,7 @@ fn test_storage_info() { storage_name: b"RenamedCountedMap".to_vec(), prefix: prefix(b"Example", b"RenamedCountedMap").to_vec(), max_values: None, - max_size: Some(1 + 4 + 8), + max_size: Some(8 + 1 + 4), }, StorageInfo { pallet_name: b"Example".to_vec(), @@ -1597,8 +1728,9 @@ fn test_storage_info() { #[test] fn assert_type_all_pallets_reversed_with_system_first_is_correct() { // Just ensure the 2 types are same. + #[allow(deprecated)] fn _a(_t: AllPalletsReversedWithSystemFirst) {} - fn _b(t: (System, (Example4, (Example2, (Example,))))) { + fn _b(t: (System, Example4, Example2, Example)) { _a(t) } } @@ -1607,7 +1739,7 @@ fn assert_type_all_pallets_reversed_with_system_first_is_correct() { fn assert_type_all_pallets_with_system_is_correct() { // Just ensure the 2 types are same. fn _a(_t: AllPalletsWithSystem) {} - fn _b(t: (System, (Example, (Example2, (Example4,))))) { + fn _b(t: (System, Example, Example2, Example4)) { _a(t) } } @@ -1616,7 +1748,7 @@ fn assert_type_all_pallets_with_system_is_correct() { fn assert_type_all_pallets_without_system_is_correct() { // Just ensure the 2 types are same. fn _a(_t: AllPalletsWithoutSystem) {} - fn _b(t: (Example, (Example2, (Example4,)))) { + fn _b(t: (Example, Example2, Example4)) { _a(t) } } @@ -1624,8 +1756,9 @@ fn assert_type_all_pallets_without_system_is_correct() { #[test] fn assert_type_all_pallets_with_system_reversed_is_correct() { // Just ensure the 2 types are same. + #[allow(deprecated)] fn _a(_t: AllPalletsWithSystemReversed) {} - fn _b(t: (Example4, (Example2, (Example, (System,))))) { + fn _b(t: (Example4, Example2, Example, System)) { _a(t) } } @@ -1633,8 +1766,9 @@ fn assert_type_all_pallets_with_system_reversed_is_correct() { #[test] fn assert_type_all_pallets_without_system_reversed_is_correct() { // Just ensure the 2 types are same. + #[allow(deprecated)] fn _a(_t: AllPalletsWithoutSystemReversed) {} - fn _b(t: (Example4, (Example2, (Example,)))) { + fn _b(t: (Example4, Example2, Example)) { _a(t) } } diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index 9327f5b6a3304..4d597e24356c7 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -31,7 +31,10 @@ impl SomeAssociation for u64 { mod pallet_old { use super::SomeAssociation; use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, traits::Get, weights::Weight, Parameter, + decl_error, decl_event, decl_module, decl_storage, + traits::Get, + weights::{RefTimeWeight, Weight}, + Parameter, }; use frame_system::ensure_root; @@ -40,7 +43,7 @@ mod pallet_old { type Balance: Parameter + codec::HasCompact + From - + Into + + Into + Default + SomeAssociation; type Event: From> + Into<::Event>; @@ -75,7 +78,7 @@ mod pallet_old { fn deposit_event() = default; const SomeConst: T::Balance = T::SomeConst::get(); - #[weight = >::into(new_value.clone())] + #[weight = >::into(new_value.clone())] fn set_dummy(origin, #[compact] new_value: T::Balance) { ensure_root(origin)?; @@ -85,7 +88,7 @@ mod pallet_old { fn on_initialize(_n: T::BlockNumber) -> Weight { >::put(T::Balance::from(10)); - 10 + Weight::from_ref_time(10) } fn on_finalize(_n: T::BlockNumber) { @@ -113,7 +116,7 @@ pub mod pallet { type Balance: Parameter + codec::HasCompact + From - + Into + + Into + Default + MaybeSerializeDeserialize + SomeAssociation @@ -131,7 +134,7 @@ pub mod pallet { impl Hooks for Pallet { fn on_initialize(_n: T::BlockNumber) -> Weight { >::put(T::Balance::from(10)); - 10 + Weight::from_ref_time(10) } fn on_finalize(_n: T::BlockNumber) { @@ -141,7 +144,7 @@ pub mod pallet { #[pallet::call] impl Pallet { - #[pallet::weight(>::into(new_value.clone()))] + #[pallet::weight(>::into(new_value.clone()))] pub fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance, diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index 3de45df223674..2fd6833eaa428 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -23,13 +23,16 @@ use frame_support::traits::{ConstU32, ConstU64}; mod pallet_old { use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, traits::Get, weights::Weight, Parameter, + decl_error, decl_event, decl_module, decl_storage, + traits::Get, + weights::{RefTimeWeight, Weight}, + Parameter, }; use frame_system::ensure_root; pub trait Config: frame_system::Config { type SomeConst: Get; - type Balance: Parameter + codec::HasCompact + From + Into + Default; + type Balance: Parameter + codec::HasCompact + From + Into + Default; type Event: From> + Into<::Event>; } @@ -62,7 +65,7 @@ mod pallet_old { fn deposit_event() = default; const SomeConst: T::Balance = T::SomeConst::get(); - #[weight = >::into(new_value.clone())] + #[weight = >::into(new_value.clone())] fn set_dummy(origin, #[compact] new_value: T::Balance) { ensure_root(origin)?; @@ -72,7 +75,7 @@ mod pallet_old { fn on_initialize(_n: T::BlockNumber) -> Weight { >::put(T::Balance::from(10)); - 10 + Weight::from_ref_time(10) } fn on_finalize(_n: T::BlockNumber) { @@ -99,7 +102,7 @@ pub mod pallet { type Balance: Parameter + codec::HasCompact + From - + Into + + Into + Default + MaybeSerializeDeserialize + scale_info::StaticTypeInfo; @@ -116,7 +119,7 @@ pub mod pallet { impl, I: 'static> Hooks for Pallet { fn on_initialize(_n: T::BlockNumber) -> Weight { >::put(T::Balance::from(10)); - 10 + Weight::from_ref_time(10) } fn on_finalize(_n: T::BlockNumber) { @@ -126,7 +129,7 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { - #[pallet::weight(>::into(new_value.clone()))] + #[pallet::weight(>::into(new_value.clone()))] pub fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance, diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 360a73e5ea2a3..2ae910e73d87e 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -31,7 +31,7 @@ use sp_runtime::{DispatchError, ModuleError}; #[frame_support::pallet] pub mod pallet { use codec::MaxEncodedLen; - use frame_support::{pallet_prelude::*, scale_info}; + use frame_support::{pallet_prelude::*, parameter_types, scale_info}; use frame_system::pallet_prelude::*; use sp_std::any::TypeId; @@ -54,10 +54,10 @@ pub mod pallet { fn on_initialize(_: BlockNumberFor) -> Weight { if TypeId::of::() == TypeId::of::<()>() { Self::deposit_event(Event::Something(10)); - 10 + Weight::from_ref_time(10) } else { Self::deposit_event(Event::Something(11)); - 11 + Weight::from_ref_time(11) } } fn on_finalize(_: BlockNumberFor) { @@ -70,10 +70,10 @@ pub mod pallet { fn on_runtime_upgrade() -> Weight { if TypeId::of::() == TypeId::of::<()>() { Self::deposit_event(Event::Something(30)); - 30 + Weight::from_ref_time(30) } else { Self::deposit_event(Event::Something(31)); - 31 + Weight::from_ref_time(31) } } fn integrity_test() {} @@ -82,7 +82,7 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { /// Doc comment put in metadata - #[pallet::weight(Weight::from(*_foo))] + #[pallet::weight(Weight::from_ref_time(*_foo as u64))] pub fn foo( origin: OriginFor, #[pallet::compact] _foo: u32, @@ -104,9 +104,11 @@ pub mod pallet { } #[pallet::error] + #[derive(PartialEq, Eq)] pub enum Error { /// doc comment put into metadata InsufficientProposersBalance, + NonExistentStorageValue, } #[pallet::event] @@ -128,6 +130,20 @@ pub mod pallet { #[pallet::storage] pub type Map2 = StorageMap<_, Twox64Concat, u16, u32>; + parameter_types! { + pub const Map3Default: Result> = Ok(1337); + } + + #[pallet::storage] + pub type Map3 = StorageMap< + _, + Blake2_128Concat, + u32, + u64, + ResultQuery::NonExistentStorageValue>, + Map3Default, + >; + #[pallet::storage] pub type DoubleMap = StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; @@ -136,6 +152,17 @@ pub mod pallet { pub type DoubleMap2 = StorageDoubleMap<_, Twox64Concat, u16, Blake2_128Concat, u32, u64>; + #[pallet::storage] + pub type DoubleMap3 = StorageDoubleMap< + _, + Blake2_128Concat, + u32, + Twox64Concat, + u64, + u128, + ResultQuery::NonExistentStorageValue>, + >; + #[pallet::storage] #[pallet::getter(fn nmap)] pub type NMap = StorageNMap<_, storage::Key, u32>; @@ -145,6 +172,15 @@ pub mod pallet { pub type NMap2 = StorageNMap<_, (storage::Key, storage::Key), u64>; + #[pallet::storage] + #[pallet::getter(fn nmap3)] + pub type NMap3 = StorageNMap< + _, + (NMapKey, NMapKey), + u128, + ResultQuery::NonExistentStorageValue>, + >; + #[pallet::genesis_config] #[derive(Default)] pub struct GenesisConfig { @@ -309,12 +345,18 @@ frame_support::construct_runtime!( } ); +use frame_support::weights::Weight; + #[test] fn call_expand() { let call_foo = pallet::Call::::foo { foo: 3 }; assert_eq!( call_foo.get_dispatch_info(), - DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } + DispatchInfo { + weight: Weight::from_ref_time(3), + class: DispatchClass::Normal, + pays_fee: Pays::Yes + } ); assert_eq!(call_foo.get_call_name(), "foo"); assert_eq!(pallet::Call::::get_call_names(), &["foo", "foo_storage_layer"]); @@ -322,7 +364,11 @@ fn call_expand() { let call_foo = pallet::Call::::foo { foo: 3 }; assert_eq!( call_foo.get_dispatch_info(), - DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } + DispatchInfo { + weight: Weight::from_ref_time(3), + class: DispatchClass::Normal, + pays_fee: Pays::Yes + } ); assert_eq!(call_foo.get_call_name(), "foo"); assert_eq!( @@ -436,6 +482,13 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); + >::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"Map3")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u64)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!(>::get(2), Ok(1337)); + >::insert(&1, &2, &3); let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -450,6 +503,17 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap3")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + k.extend(2u64.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + >::get(2, 3), + Err(pallet::Error::::NonExistentStorageValue), + ); + >::insert((&1,), &3); let mut k = [twox_128(b"Example"), twox_128(b"NMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -462,6 +526,17 @@ fn storage_expand() { k.extend(2u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + + >::insert((&1, &2), &3); + let mut k = [twox_128(b"Example"), twox_128(b"NMap3")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + >::get((2, 3)), + Err(pallet::Error::::NonExistentStorageValue), + ); }); TestExternalities::default().execute_with(|| { @@ -481,6 +556,13 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); + >::insert(1, 2); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"Map3")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u64)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!(>::get(2), Ok(1337)); + >::insert(&1, &2, &3); let mut k = [twox_128(b"Instance1Example"), twox_128(b"DoubleMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -495,6 +577,17 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"DoubleMap3")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + k.extend(2u64.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + >::get(2, 3), + Err(pallet::Error::::NonExistentStorageValue), + ); + >::insert((&1,), &3); let mut k = [twox_128(b"Instance1Example"), twox_128(b"NMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -507,6 +600,17 @@ fn storage_expand() { k.extend(2u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + + >::insert((&1, &2), &3); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"NMap3")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + >::get((2, 3)), + Err(pallet::Error::::NonExistentStorageValue), + ); }); } @@ -557,10 +661,10 @@ fn pallet_hooks_expand() { TestExternalities::default().execute_with(|| { frame_system::Pallet::::set_block_number(1); - assert_eq!(AllPalletsWithoutSystem::on_initialize(1), 21); + assert_eq!(AllPalletsWithoutSystem::on_initialize(1), Weight::from_ref_time(21)); AllPalletsWithoutSystem::on_finalize(1); - assert_eq!(AllPalletsWithoutSystem::on_runtime_upgrade(), 61); + assert_eq!(AllPalletsWithoutSystem::on_runtime_upgrade(), Weight::from_ref_time(61)); assert_eq!( frame_system::Pallet::::events()[0].event, @@ -688,6 +792,17 @@ fn metadata() { default: vec![0], docs: vec![], }, + StorageEntryMetadata { + name: "Map3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + hashers: vec![StorageHasher::Blake2_128Concat], + }, + default: vec![0, 57, 5, 0, 0, 0, 0, 0, 0], + docs: vec![], + }, StorageEntryMetadata { name: "DoubleMap", modifier: StorageEntryModifier::Optional, @@ -710,6 +825,17 @@ fn metadata() { default: vec![0], docs: vec![], }, + StorageEntryMetadata { + name: "DoubleMap3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + value: scale_info::meta_type::(), + key: scale_info::meta_type::<(u32, u64)>(), + hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], + }, + default: vec![1, 1], + docs: vec![], + }, StorageEntryMetadata { name: "NMap", modifier: StorageEntryModifier::Optional, @@ -732,6 +858,17 @@ fn metadata() { default: vec![0], docs: vec![], }, + StorageEntryMetadata { + name: "NMap3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: scale_info::meta_type::<(u8, u16)>(), + hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], + value: scale_info::meta_type::(), + }, + default: vec![1, 1], + docs: vec![], + }, ], }), calls: Some(scale_info::meta_type::>().into()), diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 3a636d9f659c7..1d581ea7ed572 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -19,8 +19,3 @@ error[E0369]: binary operation `==` cannot be applied to type `&, bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^ - | -help: consider further restricting this bound - | -17 | #[pallet::call + std::cmp::PartialEq] - | +++++++++++++++++++++ diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index f182382d18f11..b1487776eac50 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -19,11 +19,6 @@ error[E0369]: binary operation `==` cannot be applied to type `&, bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^ - | -help: consider further restricting this bound - | -17 | #[pallet::call + std::cmp::PartialEq] - | +++++++++++++++++++++ error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:20:36 diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index c196b28a31ce6..a0418760ba7e2 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -8,3 +8,7 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug` = note: add `#[derive(Debug)]` to `Bar` or manually `impl std::fmt::Debug for Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&Bar` = note: required for the cast to the object type `dyn std::fmt::Debug` +help: consider annotating `Bar` with `#[derive(Debug)]` + | +17 | #[derive(Debug)] + | diff --git a/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr b/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr index 0f2ea7e161c4e..7edb55a62d401 100644 --- a/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr +++ b/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr @@ -4,4 +4,14 @@ error[E0277]: the trait bound `MyError: PalletError` is not satisfied 1 | #[frame_support::pallet] | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `PalletError` is not implemented for `MyError` | + = help: the following other types implement trait `PalletError`: + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + and 36 others = note: this error originates in the derive macro `frame_support::PalletError` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index 3db258a819fcb..92623e0329fe3 100644 --- a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -9,11 +9,6 @@ error[E0369]: binary operation `==` cannot be applied to type `& { - | +++++++++++++++++++++ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` --> tests/pallet_ui/event_field_not_member.rs:23:7 diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr index e01f66fa3d19d..80903928585b4 100644 --- a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr @@ -9,3 +9,7 @@ note: required by a bound in `GenesisBuild` | | pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeDeserialize { | ^^^^^^^ required by this bound in `GenesisBuild` +help: consider annotating `pallet::GenesisConfig` with `#[derive(Default)]` + | +19 | #[derive(Default)] + | diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index d1a89fbb850e9..ff52a094d6f8d 100644 --- a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -1,13 +1,13 @@ error[E0107]: missing generics for trait `Hooks` - --> $DIR/hooks_invalid_item.rs:12:18 + --> tests/pallet_ui/hooks_invalid_item.rs:12:18 | 12 | impl Hooks for Pallet {} | ^^^^^ expected 1 generic argument | note: trait defined here, with 1 generic parameter: `BlockNumber` - --> $DIR/hooks.rs:214:11 + --> $WORKSPACE/frame/support/src/traits/hooks.rs | -214 | pub trait Hooks { + | pub trait Hooks { | ^^^^^ ----------- help: add missing generic argument | diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 87528751a0a7a..e674e49eddbe5 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -4,6 +4,11 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 10 | #[pallet::without_storage_info] | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeDecode`: + Arc + Box + Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required because of the requirements on the impl of `Decode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -14,6 +19,16 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 10 | #[pallet::without_storage_info] | ^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | + = help: the following other types implement trait `EncodeLike`: + <&&T as EncodeLike> + <&T as EncodeLike> + <&T as EncodeLike> + <&[(K, V)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[T] as EncodeLike>> + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -24,6 +39,16 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 10 | #[pallet::without_storage_info] | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc + Box + Cow<'a, T> + Rc + Vec + bytes::bytes::Bytes + and 3 others = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` @@ -35,6 +60,16 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` | + = help: the following other types implement trait `TypeInfo`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and 159 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -44,6 +79,11 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeDecode`: + Arc + Box + Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required because of the requirements on the impl of `Decode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -54,6 +94,16 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | + = help: the following other types implement trait `EncodeLike`: + <&&T as EncodeLike> + <&T as EncodeLike> + <&T as EncodeLike> + <&[(K, V)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[T] as EncodeLike>> + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -64,6 +114,16 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc + Box + Cow<'a, T> + Rc + Vec + bytes::bytes::Bytes + and 3 others = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 6c3f6dc662fbc..ecdc18263432e 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -4,6 +4,11 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 10 | #[pallet::without_storage_info] | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeDecode`: + Arc + Box + Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required because of the requirements on the impl of `Decode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -14,6 +19,16 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 10 | #[pallet::without_storage_info] | ^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | + = help: the following other types implement trait `EncodeLike`: + <&&T as EncodeLike> + <&T as EncodeLike> + <&T as EncodeLike> + <&[(K, V)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[T] as EncodeLike>> + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -24,6 +39,16 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 10 | #[pallet::without_storage_info] | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc + Box + Cow<'a, T> + Rc + Vec + bytes::bytes::Bytes + and 3 others = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` @@ -35,6 +60,16 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` | + = help: the following other types implement trait `TypeInfo`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and 159 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -44,6 +79,11 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeDecode`: + Arc + Box + Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required because of the requirements on the impl of `Decode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -54,6 +94,16 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | + = help: the following other types implement trait `EncodeLike`: + <&&T as EncodeLike> + <&T as EncodeLike> + <&T as EncodeLike> + <&[(K, V)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[T] as EncodeLike>> + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -64,6 +114,16 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc + Box + Cow<'a, T> + Rc + Vec + bytes::bytes::Bytes + and 3 others = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index 68856f122c7ac..d9cd20711403d 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -4,4 +4,14 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied 9 | #[pallet::pallet] | ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` | + = help: the following other types implement trait `MaxEncodedLen`: + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + and 76 others = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 226cb40f1d48b..9a4e8d740cb2c 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -4,5 +4,15 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied 12 | #[pallet::pallet] | ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` | + = help: the following other types implement trait `MaxEncodedLen`: + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + and 76 others = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.rs b/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.rs new file mode 100644 index 0000000000000..a051cc087db58 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.rs @@ -0,0 +1,21 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::error] + pub enum Error { + NonExistentValue, + } + + #[pallet::storage] + type Foo = StorageValue<_, u8, ResultQuery>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr new file mode 100644 index 0000000000000..98265462bbdfb --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr @@ -0,0 +1,15 @@ +error[E0107]: missing generics for enum `pallet::Error` + --> tests/pallet_ui/storage_result_query_missing_generics.rs:17:56 + | +17 | type Foo = StorageValue<_, u8, ResultQuery>; + | ^^^^^ expected 1 generic argument + | +note: enum defined here, with 1 generic parameter: `T` + --> tests/pallet_ui/storage_result_query_missing_generics.rs:12:11 + | +12 | pub enum Error { + | ^^^^^ - +help: add missing generic argument + | +17 | type Foo = StorageValue<_, u8, ResultQuery::NonExistentValue>>; + | ~~~~~~~~ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.rs b/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.rs new file mode 100644 index 0000000000000..9e0da4b62128d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::error] + pub enum Error { + NonExistentValue, + SomeOtherError, + } + + #[pallet::storage] + type Foo = StorageValue<_, u8, ResultQuery::NonExistentValue, SomeOtherError>>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.stderr new file mode 100644 index 0000000000000..4be2a36eb89e1 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, unexpected number of generic arguments for ResultQuery, expected 1 type argument, found 2 + --> tests/pallet_ui/storage_result_query_multiple_type_args.rs:19:56 + | +19 | type Foo = StorageValue<_, u8, ResultQuery::NonExistentValue, SomeOtherError>>; + | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs b/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs new file mode 100644 index 0000000000000..102a2261f8333 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs @@ -0,0 +1,16 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::storage] + type Foo = StorageValue<_, u8, ResultQuery>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.stderr new file mode 100644 index 0000000000000..77a7972a5b5cf --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, unexpected number of path segments for the generics in ResultQuery, expected a path with at least 2 segments, found 1 + --> tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs:12:56 + | +12 | type Foo = StorageValue<_, u8, ResultQuery>; + | ^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.rs b/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.rs new file mode 100644 index 0000000000000..f30dc3b6a3cc7 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::error] + pub enum Error { + NonExistentValue, + } + + #[pallet::storage] + type Foo = StorageValue<_, u8, ResultQuery(NonExistentValue)>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr new file mode 100644 index 0000000000000..caffd846f272a --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, unexpected generic args for ResultQuery, expected angle-bracketed arguments, found `(NonExistentValue)` + --> tests/pallet_ui/storage_result_query_parenthesized_generics.rs:18:55 + | +18 | type Foo = StorageValue<_, u8, ResultQuery(NonExistentValue)>; + | ^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.rs b/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.rs new file mode 100644 index 0000000000000..a5065398b3970 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::error] + pub enum Error { + NonExistentValue, + } + + #[pallet::storage] + type Foo = StorageValue<_, u8, ResultQuery<'static>>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.stderr new file mode 100644 index 0000000000000..9f333ae28e6aa --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, unexpected generic argument kind, expected a type path to a `PalletError` enum variant, found `'static` + --> tests/pallet_ui/storage_result_query_wrong_generic_kind.rs:18:56 + | +18 | type Foo = StorageValue<_, u8, ResultQuery<'static>>; + | ^^^^^^^ diff --git a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs index 7ed8454668327..6415c3c0d2c07 100644 --- a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs +++ b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -54,7 +54,7 @@ frame_support::decl_module! { } fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { - 0 + frame_support::weights::Weight::zero() } } } diff --git a/frame/support/test/tests/storage_layers.rs b/frame/support/test/tests/storage_layers.rs index 05ed60fe90196..7404ccace2c09 100644 --- a/frame/support/test/tests/storage_layers.rs +++ b/frame/support/test/tests/storage_layers.rs @@ -276,5 +276,7 @@ fn storage_layer_in_decl_pallet_call() { let call2 = Call::DeclPallet(decl_pallet::Call::set_value { value: 1 }); assert_noop!(call2.dispatch(Origin::signed(0)), "Revert!"); + // Calling the function directly also works with storage layers. + assert_noop!(decl_pallet::Module::::set_value(Origin::signed(1), 1), "Revert!"); }); } diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 0bc34fcbc5be2..017298dab3928 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -16,7 +16,10 @@ // limitations under the License. use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use frame_support::traits::{ConstU32, ConstU64}; +use frame_support::{ + traits::{ConstU32, ConstU64}, + weights::Weight, +}; use sp_core::H256; use sp_runtime::{ testing::Header, @@ -61,7 +64,7 @@ frame_support::construct_runtime!( frame_support::parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::with_sensible_defaults( - 4 * 1024 * 1024, Perbill::from_percent(75), + Weight::from_ref_time(4 * 1024 * 1024), Perbill::from_percent(75), ); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength::max_with_normal_ratio( diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs index 5090093fe168f..59a6e14aca175 100644 --- a/frame/system/src/extensions/check_mortality.rs +++ b/frame/system/src/extensions/check_mortality.rs @@ -101,7 +101,7 @@ impl SignedExtension for CheckMortality { mod tests { use super::*; use crate::mock::{new_test_ext, System, Test, CALL}; - use frame_support::weights::{DispatchClass, DispatchInfo, Pays}; + use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; use sp_core::H256; #[test] @@ -126,8 +126,11 @@ mod tests { #[test] fn signed_ext_check_era_should_change_longevity() { new_test_ext().execute_with(|| { - let normal = - DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let normal = DispatchInfo { + weight: Weight::from_ref_time(100), + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }; let len = 0_usize; let ext = ( crate::CheckWeight::::new(), diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index b59c36ecb53b5..b7232c430696d 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -19,7 +19,7 @@ use crate::{limits::BlockWeights, Config, Pallet}; use codec::{Decode, Encode}; use frame_support::{ traits::Get, - weights::{DispatchClass, DispatchInfo, PostDispatchInfo}, + weights::{DispatchClass, DispatchInfo, PostDispatchInfo, Weight}, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -238,7 +238,7 @@ where } let unspent = post_info.calc_unspent(info); - if unspent > 0 { + if unspent > Weight::zero() { crate::BlockWeight::::mutate(|current_weight| { current_weight.sub(unspent, info.class); }) @@ -297,7 +297,7 @@ mod tests { fn check(call: impl FnOnce(&DispatchInfo, usize)) { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: Weight::max_value(), + weight: Weight::MAX, class: DispatchClass::Mandatory, ..Default::default() }; @@ -309,7 +309,7 @@ mod tests { check(|max, len| { assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); - assert_eq!(System::block_weight().total(), Weight::max_value()); + assert_eq!(System::block_weight().total(), Weight::MAX); assert!(System::block_weight().total() > block_weight_limit()); }); check(|max, len| { @@ -321,7 +321,8 @@ mod tests { fn normal_extrinsic_limited_by_maximum_extrinsic_weight() { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + 1, + weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + + Weight::one(), class: DispatchClass::Normal, ..Default::default() }; @@ -347,7 +348,7 @@ mod tests { let okay = DispatchInfo { weight, class: DispatchClass::Operational, ..Default::default() }; let max = DispatchInfo { - weight: weight + 1, + weight: weight + Weight::one(), class: DispatchClass::Operational, ..Default::default() }; @@ -364,8 +365,8 @@ mod tests { #[test] fn register_extra_weight_unchecked_doesnt_care_about_limits() { new_test_ext().execute_with(|| { - System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Normal); - assert_eq!(System::block_weight().total(), Weight::max_value()); + System::register_extra_weight_unchecked(Weight::MAX, DispatchClass::Normal); + assert_eq!(System::block_weight().total(), Weight::MAX); assert!(System::block_weight().total() > block_weight_limit()); }); } @@ -378,9 +379,10 @@ mod tests { // 10 is taken for block execution weight // So normal extrinsic can be 758 weight (-5 for base extrinsic weight) // And Operational can be 256 to produce a full block (-5 for base) - let max_normal = DispatchInfo { weight: 753, ..Default::default() }; + let max_normal = + DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; let rest_operational = DispatchInfo { - weight: 251, + weight: Weight::from_ref_time(251), class: DispatchClass::Operational, ..Default::default() }; @@ -388,9 +390,9 @@ mod tests { let len = 0_usize; assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(System::block_weight().total(), 768); + assert_eq!(System::block_weight().total(), Weight::from_ref_time(768)); assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - assert_eq!(block_weight_limit(), 1024); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); assert_eq!(System::block_weight().total(), block_weight_limit()); // Checking single extrinsic should not take current block weight into account. assert_eq!(CheckWeight::::check_extrinsic_weight(&rest_operational), Ok(())); @@ -401,9 +403,10 @@ mod tests { fn dispatch_order_does_not_effect_weight_logic() { new_test_ext().execute_with(|| { // We switch the order of `full_block_with_normal_and_operational` - let max_normal = DispatchInfo { weight: 753, ..Default::default() }; + let max_normal = + DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; let rest_operational = DispatchInfo { - weight: 251, + weight: Weight::from_ref_time(251), class: DispatchClass::Operational, ..Default::default() }; @@ -412,9 +415,9 @@ mod tests { assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); // Extra 15 here from block execution + base extrinsic weight - assert_eq!(System::block_weight().total(), 266); + assert_eq!(System::block_weight().total(), Weight::from_ref_time(266)); assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(block_weight_limit(), 1024); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); assert_eq!(System::block_weight().total(), block_weight_limit()); }); } @@ -423,11 +426,14 @@ mod tests { fn operational_works_on_full_block() { new_test_ext().execute_with(|| { // An on_initialize takes up the whole block! (Every time!) - System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Mandatory); - let dispatch_normal = - DispatchInfo { weight: 251, class: DispatchClass::Normal, ..Default::default() }; + System::register_extra_weight_unchecked(Weight::MAX, DispatchClass::Mandatory); + let dispatch_normal = DispatchInfo { + weight: Weight::from_ref_time(251), + class: DispatchClass::Normal, + ..Default::default() + }; let dispatch_operational = DispatchInfo { - weight: 251, + weight: Weight::from_ref_time(251), class: DispatchClass::Operational, ..Default::default() }; @@ -453,9 +459,9 @@ mod tests { #[test] fn signed_ext_check_weight_works_operational_tx() { new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: 100, ..Default::default() }; + let normal = DispatchInfo { weight: Weight::from_ref_time(100), ..Default::default() }; let op = DispatchInfo { - weight: 100, + weight: Weight::from_ref_time(100), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -489,7 +495,7 @@ mod tests { fn signed_ext_check_weight_block_size_works() { new_test_ext().execute_with(|| { let normal = DispatchInfo::default(); - let normal_limit = normal_weight_limit() as usize; + let normal_limit = normal_weight_limit().ref_time() as usize; let reset_check_weight = |tx, s, f| { AllExtrinsicsLen::::put(0); let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); @@ -505,8 +511,11 @@ mod tests { reset_check_weight(&normal, normal_limit + 1, true); // Operational ones don't have this limit. - let op = - DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes }; + let op = DispatchInfo { + weight: Weight::zero(), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; reset_check_weight(&op, normal_limit, false); reset_check_weight(&op, normal_limit + 100, false); reset_check_weight(&op, 1024, false); @@ -518,12 +527,14 @@ mod tests { fn signed_ext_check_weight_works_normal_tx() { new_test_ext().execute_with(|| { let normal_limit = normal_weight_limit(); - let small = DispatchInfo { weight: 100, ..Default::default() }; + let small = DispatchInfo { weight: Weight::from_ref_time(100), ..Default::default() }; let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; let medium = DispatchInfo { weight: normal_limit - base_extrinsic, ..Default::default() }; - let big = - DispatchInfo { weight: normal_limit - base_extrinsic + 1, ..Default::default() }; + let big = DispatchInfo { + weight: normal_limit - base_extrinsic + Weight::one(), + ..Default::default() + }; let len = 0_usize; let reset_check_weight = |i, f, s| { @@ -538,9 +549,9 @@ mod tests { } }; - reset_check_weight(&small, false, 0); - reset_check_weight(&medium, false, 0); - reset_check_weight(&big, true, 1); + reset_check_weight(&small, false, Weight::zero()); + reset_check_weight(&medium, false, Weight::zero()); + reset_check_weight(&big, true, Weight::one()); }) } @@ -548,20 +559,26 @@ mod tests { fn signed_ext_check_weight_refund_works() { new_test_ext().execute_with(|| { // This is half of the max block weight - let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = - PostDispatchInfo { actual_weight: Some(128), pays_fee: Default::default() }; + let info = DispatchInfo { weight: Weight::from_ref_time(512), ..Default::default() }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_ref_time(128)), + pays_fee: Default::default(), + }; let len = 0_usize; let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; // We allow 75% for normal transaction, so we put 25% - extrinsic base weight BlockWeight::::mutate(|current_weight| { - current_weight.set(0, DispatchClass::Mandatory); - current_weight.set(256 - base_extrinsic, DispatchClass::Normal); + current_weight.set(Weight::zero(), DispatchClass::Mandatory); + current_weight + .set(Weight::from_ref_time(256) - base_extrinsic, DispatchClass::Normal); }); let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); - assert_eq!(BlockWeight::::get().total(), info.weight + 256); + assert_eq!( + BlockWeight::::get().total(), + info.weight + Weight::from_ref_time(256) + ); assert_ok!(CheckWeight::::post_dispatch( Some(pre), @@ -570,27 +587,34 @@ mod tests { len, &Ok(()) )); - assert_eq!(BlockWeight::::get().total(), post_info.actual_weight.unwrap() + 256); + assert_eq!( + BlockWeight::::get().total(), + post_info.actual_weight.unwrap() + Weight::from_ref_time(256) + ); }) } #[test] fn signed_ext_check_weight_actual_weight_higher_than_max_is_capped() { new_test_ext().execute_with(|| { - let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = - PostDispatchInfo { actual_weight: Some(700), pays_fee: Default::default() }; + let info = DispatchInfo { weight: Weight::from_ref_time(512), ..Default::default() }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_ref_time(700)), + pays_fee: Default::default(), + }; let len = 0_usize; BlockWeight::::mutate(|current_weight| { - current_weight.set(0, DispatchClass::Mandatory); - current_weight.set(128, DispatchClass::Normal); + current_weight.set(Weight::zero(), DispatchClass::Mandatory); + current_weight.set(Weight::from_ref_time(128), DispatchClass::Normal); }); let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); assert_eq!( BlockWeight::::get().total(), - info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, + info.weight + + Weight::from_ref_time(128) + + block_weights().get(DispatchClass::Normal).base_extrinsic, ); assert_ok!(CheckWeight::::post_dispatch( @@ -602,7 +626,9 @@ mod tests { )); assert_eq!( BlockWeight::::get().total(), - info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, + info.weight + + Weight::from_ref_time(128) + + block_weights().get(DispatchClass::Normal).base_extrinsic, ); }) } @@ -611,7 +637,7 @@ mod tests { fn zero_weight_extrinsic_still_has_base_weight() { new_test_ext().execute_with(|| { let weights = block_weights(); - let free = DispatchInfo { weight: 0, ..Default::default() }; + let free = DispatchInfo { weight: Weight::zero(), ..Default::default() }; let len = 0_usize; // Initial weight from `weights.base_block` @@ -630,9 +656,10 @@ mod tests { // Max block is 1024 // Max normal is 768 (75%) // Max mandatory is unlimited - let max_normal = DispatchInfo { weight: 753, ..Default::default() }; + let max_normal = + DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; let mandatory = DispatchInfo { - weight: 1019, + weight: Weight::from_ref_time(1019), class: DispatchClass::Mandatory, ..Default::default() }; @@ -640,10 +667,10 @@ mod tests { let len = 0_usize; assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(System::block_weight().total(), 768); + assert_eq!(System::block_weight().total(), Weight::from_ref_time(768)); assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); - assert_eq!(block_weight_limit(), 1024); - assert_eq!(System::block_weight().total(), 1024 + 768); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); + assert_eq!(System::block_weight().total(), Weight::from_ref_time(1024 + 768)); assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); }); } @@ -652,30 +679,36 @@ mod tests { fn no_max_total_should_still_be_limited_by_max_block() { // given let maximum_weight = BlockWeights::builder() - .base_block(0) + .base_block(Weight::zero()) .for_class(DispatchClass::non_mandatory(), |w| { - w.base_extrinsic = 0; - w.max_total = Some(20); + w.base_extrinsic = Weight::zero(); + w.max_total = Some(Weight::from_ref_time(20)); }) .for_class(DispatchClass::Mandatory, |w| { - w.base_extrinsic = 0; - w.reserved = Some(5); + w.base_extrinsic = Weight::zero(); + w.reserved = Some(Weight::from_ref_time(5)); w.max_total = None; }) .build_or_panic(); let all_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => 10, - DispatchClass::Operational => 10, - DispatchClass::Mandatory => 0, + DispatchClass::Normal => Weight::from_ref_time(10), + DispatchClass::Operational => Weight::from_ref_time(10), + DispatchClass::Mandatory => Weight::zero(), }); assert_eq!(maximum_weight.max_block, all_weight.total()); // fits into reserved - let mandatory1 = - DispatchInfo { weight: 5, class: DispatchClass::Mandatory, ..Default::default() }; + let mandatory1 = DispatchInfo { + weight: Weight::from_ref_time(5), + class: DispatchClass::Mandatory, + ..Default::default() + }; // does not fit into reserved and the block is full. - let mandatory2 = - DispatchInfo { weight: 6, class: DispatchClass::Mandatory, ..Default::default() }; + let mandatory2 = DispatchInfo { + weight: Weight::from_ref_time(6), + class: DispatchClass::Mandatory, + ..Default::default() + }; // when assert_ok!(calculate_consumed_weight::<::Call>( diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index dff2a8e8ce504..61d17f391788a 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -91,8 +91,8 @@ use frame_support::{ OriginTrait, PalletInfo, SortedMembers, StoredMap, TypedGet, }, weights::{ - extract_actual_weight, DispatchClass, DispatchInfo, PerDispatchClass, RuntimeDbWeight, - Weight, + extract_actual_pays_fee, extract_actual_weight, DispatchClass, DispatchInfo, + PerDispatchClass, RuntimeDbWeight, Weight, }, Parameter, }; @@ -197,6 +197,7 @@ impl, MaxOverflow: Get> ConsumerLimits for (MaxNormal, pub mod pallet { use crate::{self as frame_system, pallet_prelude::*, *}; use frame_support::pallet_prelude::*; + use sp_runtime::DispatchErrorWithPostInfo; /// System configuration trait. Implemented by runtime. #[pallet::config] @@ -232,7 +233,8 @@ pub mod pallet { + Default + MaybeDisplay + AtLeast32Bit - + Copy; + + Copy + + MaxEncodedLen; /// The block number type used by the runtime. type BlockNumber: Parameter @@ -319,7 +321,7 @@ pub mod pallet { /// Data to be associated with an account (other than nonce/transaction counter, which this /// pallet does regardless). - type AccountData: Member + FullCodec + Clone + Default + TypeInfo; + type AccountData: Member + FullCodec + Clone + Default + TypeInfo + MaxEncodedLen; /// Handler for when a new account has just been created. type OnNewAccount: OnNewAccount; @@ -331,7 +333,7 @@ pub mod pallet { type SystemWeightInfo: WeightInfo; - /// The designated SS85 prefix of this chain. + /// The designated SS58 prefix of this chain. /// /// This replaces the "ss58Format" property declared in the chain spec. Reason is /// that the runtime should know about the prefix in order to make use of it as @@ -354,7 +356,6 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub (super) trait Store)] - #[pallet::without_storage_info] pub struct Pallet(_); #[pallet::hooks] @@ -371,8 +372,16 @@ pub mod pallet { // that's not possible at present (since it's within the pallet macro). #[pallet::weight(*_ratio * T::BlockWeights::get().max_block)] pub fn fill_block(origin: OriginFor, _ratio: Perbill) -> DispatchResultWithPostInfo { - ensure_root(origin)?; - Ok(().into()) + match ensure_root(origin) { + Ok(_) => Ok(().into()), + Err(_) => { + // roughly same as a 4 byte remark since perbill is u32. + Err(DispatchErrorWithPostInfo { + post_info: Some(T::SystemWeightInfo::remark(4u32)).into(), + error: DispatchError::BadOrigin, + }) + }, + } } /// Make some on-chain remark. @@ -569,6 +578,7 @@ pub mod pallet { /// Extrinsics data for the current block (maps an extrinsic's index to its data). #[pallet::storage] #[pallet::getter(fn extrinsic_data)] + #[pallet::unbounded] pub(super) type ExtrinsicData = StorageMap<_, Twox64Concat, u32, Vec, ValueQuery>; @@ -584,6 +594,7 @@ pub mod pallet { /// Digest of the current block, also part of the block header. #[pallet::storage] + #[pallet::unbounded] #[pallet::getter(fn digest)] pub(super) type Digest = StorageValue<_, generic::Digest, ValueQuery>; @@ -595,6 +606,7 @@ pub mod pallet { /// Events have a large in-memory size. Box the events to not go out-of-memory /// just in case someone still reads them from within the runtime. #[pallet::storage] + #[pallet::unbounded] pub(super) type Events = StorageValue<_, Vec>>, ValueQuery>; @@ -614,12 +626,14 @@ pub mod pallet { /// the `EventIndex` then in case if the topic has the same contents on the next block /// no notification will be triggered thus the event might be lost. #[pallet::storage] + #[pallet::unbounded] #[pallet::getter(fn event_topics)] pub(super) type EventTopics = StorageMap<_, Blake2_128Concat, T::Hash, Vec<(T::BlockNumber, EventIndex)>, ValueQuery>; /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. #[pallet::storage] + #[pallet::unbounded] pub type LastRuntimeUpgrade = StorageValue<_, LastRuntimeUpgradeInfo>; /// True if we have upgraded so that `type RefCount` is `u32`. False (default) if not. @@ -681,7 +695,7 @@ pub type Key = Vec; pub type KeyValue = (Vec, Vec); /// A phase of a block's execution. -#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] pub enum Phase { /// Applying an extrinsic. @@ -729,7 +743,7 @@ type EventIndex = u32; pub type RefCount = u32; /// Information of an account. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct AccountInfo { /// The number of transactions this account has sent. pub nonce: Index, @@ -1313,18 +1327,18 @@ impl Pallet { ).deconstruct(), Self::block_weight().get(DispatchClass::Normal), sp_runtime::Percent::from_rational( - *Self::block_weight().get(DispatchClass::Normal), - T::BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap_or(Bounded::max_value()) + Self::block_weight().get(DispatchClass::Normal).ref_time(), + T::BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap_or(Bounded::max_value()).ref_time() ).deconstruct(), Self::block_weight().get(DispatchClass::Operational), sp_runtime::Percent::from_rational( - *Self::block_weight().get(DispatchClass::Operational), - T::BlockWeights::get().get(DispatchClass::Operational).max_total.unwrap_or(Bounded::max_value()) + Self::block_weight().get(DispatchClass::Operational).ref_time(), + T::BlockWeights::get().get(DispatchClass::Operational).max_total.unwrap_or(Bounded::max_value()).ref_time() ).deconstruct(), Self::block_weight().get(DispatchClass::Mandatory), sp_runtime::Percent::from_rational( - *Self::block_weight().get(DispatchClass::Mandatory), - T::BlockWeights::get().get(DispatchClass::Mandatory).max_total.unwrap_or(Bounded::max_value()) + Self::block_weight().get(DispatchClass::Mandatory).ref_time(), + T::BlockWeights::get().get(DispatchClass::Mandatory).max_total.unwrap_or(Bounded::max_value()).ref_time() ).deconstruct(), ); ExecutionPhase::::kill(); @@ -1491,6 +1505,7 @@ impl Pallet { /// To be called immediately after an extrinsic has been applied. pub fn note_applied_extrinsic(r: &DispatchResultWithPostInfo, mut info: DispatchInfo) { info.weight = extract_actual_weight(r, &info); + info.pays_fee = extract_actual_pays_fee(r, &info); Self::deposit_event(match r { Ok(_) => Event::ExtrinsicSuccess { dispatch_info: info }, Err(err) => { diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index d3c108afb6f32..d9be460b3a4fb 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -27,7 +27,7 @@ use frame_support::weights::{constants, DispatchClass, OneOrMany, PerDispatchClass, Weight}; use scale_info::TypeInfo; -use sp_runtime::{Perbill, RuntimeDebug}; +use sp_runtime::{traits::Bounded, Perbill, RuntimeDebug}; /// Block length limit configuration. #[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode, TypeInfo)] @@ -230,14 +230,15 @@ impl BlockWeights { // base_for_class error_assert!( (max_for_class > self.base_block && max_for_class > base_for_class) - || max_for_class == 0, + || max_for_class == Weight::zero(), &mut error, "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", class, max_for_class, self.base_block, base_for_class, ); // Max extrinsic can't be greater than max_for_class. error_assert!( - weights.max_extrinsic.unwrap_or(0) <= max_for_class.saturating_sub(base_for_class), + weights.max_extrinsic.unwrap_or(Weight::zero()) <= + max_for_class.saturating_sub(base_for_class), &mut error, "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", class, @@ -246,14 +247,14 @@ impl BlockWeights { ); // Max extrinsic should not be 0 error_assert!( - weights.max_extrinsic.unwrap_or_else(Weight::max_value) > 0, + weights.max_extrinsic.unwrap_or_else(Weight::max_value) > Weight::zero(), &mut error, "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", class, weights.max_extrinsic, ); // Make sure that if reserved is set it's greater than base_for_class. error_assert!( - reserved > base_for_class || reserved == 0, + reserved > base_for_class || reserved == Weight::zero(), &mut error, "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", class, @@ -262,7 +263,7 @@ impl BlockWeights { ); // Make sure max block is greater than max_total if it's set. error_assert!( - self.max_block >= weights.max_total.unwrap_or(0), + self.max_block >= weights.max_total.unwrap_or(Weight::zero()), &mut error, "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", class, @@ -294,9 +295,9 @@ impl BlockWeights { /// is not suitable for production deployments. pub fn simple_max(block_weight: Weight) -> Self { Self::builder() - .base_block(0) + .base_block(Weight::new()) .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = 0; + weights.base_extrinsic = Weight::new(); }) .for_class(DispatchClass::non_mandatory(), |weights| { weights.max_total = block_weight.into(); @@ -333,9 +334,10 @@ impl BlockWeights { BlockWeightsBuilder { weights: BlockWeights { base_block: constants::BlockExecutionWeight::get(), - max_block: 0, + max_block: Weight::zero(), per_class: PerDispatchClass::new(|class| { - let initial = if class == DispatchClass::Mandatory { None } else { Some(0) }; + let initial = + if class == DispatchClass::Mandatory { None } else { Some(Weight::zero()) }; WeightsPerClass { base_extrinsic: constants::ExtrinsicBaseWeight::get(), max_extrinsic: None, @@ -369,7 +371,7 @@ impl BlockWeightsBuilder { /// /// This is to make sure that extrinsics don't stay forever in the pool, /// because they could seamingly fit the block (since they are below `max_block`), - /// but the cost of calling `on_initialize` alway prevents them from being included. + /// but the cost of calling `on_initialize` always prevents them from being included. pub fn avg_block_initialization(mut self, init_cost: Perbill) -> Self { self.init_cost = Some(init_cost); self diff --git a/frame/system/src/migrations/mod.rs b/frame/system/src/migrations/mod.rs index f02af7a316fe1..15746d7376ac5 100644 --- a/frame/system/src/migrations/mod.rs +++ b/frame/system/src/migrations/mod.rs @@ -81,7 +81,7 @@ pub fn migrate_from_single_u8_to_triple_ref_count() -> Wei ); >::put(true); >::put(true); - Weight::max_value() + Weight::MAX } /// Migrate from unique `u32` reference counting to triple `u32` reference counting. @@ -99,7 +99,7 @@ pub fn migrate_from_single_to_triple_ref_count() -> Weight translated ); >::put(true); - Weight::max_value() + Weight::MAX } /// Migrate from dual `u32` reference counting to triple `u32` reference counting. @@ -117,5 +117,5 @@ pub fn migrate_from_dual_to_triple_ref_count() -> Weight { translated ); >::put(true); - Weight::max_value() + Weight::MAX } diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index f3f542aa83a9a..23ab3c2af20b0 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -42,7 +42,7 @@ frame_support::construct_runtime!( ); const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -const MAX_BLOCK_WEIGHT: Weight = 1024; +const MAX_BLOCK_WEIGHT: Weight = Weight::from_ref_time(1024); parameter_types! { pub Version: RuntimeVersion = RuntimeVersion { @@ -60,9 +60,9 @@ parameter_types! { write: 100, }; pub RuntimeBlockWeights: limits::BlockWeights = limits::BlockWeights::builder() - .base_block(10) + .base_block(Weight::from_ref_time(10)) .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = 5; + weights.base_extrinsic = Weight::from_ref_time(5); }) .for_class(DispatchClass::Normal, |weights| { weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT); diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index 0facd796b2a0c..f82ea338bd146 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -17,7 +17,9 @@ use crate::*; use frame_support::{ - assert_noop, assert_ok, dispatch::PostDispatchInfo, weights::WithPostDispatchInfo, + assert_noop, assert_ok, + dispatch::PostDispatchInfo, + weights::{Pays, WithPostDispatchInfo}, }; use mock::{Origin, *}; use sp_core::H256; @@ -216,13 +218,13 @@ fn deposit_event_should_work() { } #[test] -fn deposit_event_uses_actual_weight() { +fn deposit_event_uses_actual_weight_and_pays_fee() { new_test_ext().execute_with(|| { System::reset_events(); System::initialize(&1, &[0u8; 32].into(), &Default::default()); System::note_finished_initialize(); - let pre_info = DispatchInfo { weight: 1000, ..Default::default() }; + let pre_info = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); System::note_applied_extrinsic(&Ok(Some(1000).into()), pre_info); System::note_applied_extrinsic( @@ -230,7 +232,42 @@ fn deposit_event_uses_actual_weight() { &Ok(Some(1200).into()), pre_info, ); - System::note_applied_extrinsic(&Err(DispatchError::BadOrigin.with_weight(999)), pre_info); + System::note_applied_extrinsic(&Ok((Some(2_500_000), Pays::Yes).into()), pre_info); + System::note_applied_extrinsic(&Ok(Pays::No.into()), pre_info); + System::note_applied_extrinsic(&Ok((Some(2_500_000), Pays::No).into()), pre_info); + System::note_applied_extrinsic(&Ok((Some(500), Pays::No).into()), pre_info); + System::note_applied_extrinsic( + &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(999))), + pre_info, + ); + + System::note_applied_extrinsic( + &Err(DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { actual_weight: None, pays_fee: Pays::Yes }, + error: DispatchError::BadOrigin, + }), + pre_info, + ); + System::note_applied_extrinsic( + &Err(DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + actual_weight: Some(Weight::from_ref_time(800)), + pays_fee: Pays::Yes, + }, + error: DispatchError::BadOrigin, + }), + pre_info, + ); + System::note_applied_extrinsic( + &Err(DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + actual_weight: Some(Weight::from_ref_time(800)), + pays_fee: Pays::No, + }, + error: DispatchError::BadOrigin, + }), + pre_info, + ); assert_eq!( System::events(), @@ -238,7 +275,10 @@ fn deposit_event_uses_actual_weight() { EventRecord { phase: Phase::ApplyExtrinsic(0), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: 300, ..Default::default() }, + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(300), + ..Default::default() + }, } .into(), topics: vec![] @@ -246,7 +286,10 @@ fn deposit_event_uses_actual_weight() { EventRecord { phase: Phase::ApplyExtrinsic(1), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: 1000, ..Default::default() }, + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000), + ..Default::default() + }, } .into(), topics: vec![] @@ -254,16 +297,109 @@ fn deposit_event_uses_actual_weight() { EventRecord { phase: Phase::ApplyExtrinsic(2), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: 1000, ..Default::default() }, + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000), + ..Default::default() + }, } .into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(3), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000), + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(4), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(5), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(6), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(500), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(7), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(999), + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(8), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000), + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(9), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(800), + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(10), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { weight: 999, ..Default::default() }, + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(800), + pays_fee: Pays::No, + ..Default::default() + }, } .into(), topics: vec![] diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index 19719032587ef..4f7f168eb55ab 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for frame_system. @@ -59,44 +59,44 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 3932160]`. fn remark(_b: u32, ) -> Weight { - (1_000_000 as Weight) + Weight::from_ref_time(1_000_000 as RefTimeWeight) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) } // Storage: System Digest (r:1 w:1) // Storage: unknown [0x3a686561707061676573] (r:0 w:1) fn set_heap_pages() -> Weight { - (5_367_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(5_367_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `i` is `[1, 1000]`. fn set_storage(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((603_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(603_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `i` is `[1, 1000]`. fn kill_storage(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((513_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(513_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `p` is `[1, 1000]`. fn kill_prefix(p: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_026_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(1_026_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } } @@ -104,43 +104,43 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// The range of component `b` is `[0, 3932160]`. fn remark(_b: u32, ) -> Weight { - (1_000_000 as Weight) + Weight::from_ref_time(1_000_000 as RefTimeWeight) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) } // Storage: System Digest (r:1 w:1) // Storage: unknown [0x3a686561707061676573] (r:0 w:1) fn set_heap_pages() -> Weight { - (5_367_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(5_367_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `i` is `[1, 1000]`. fn set_storage(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((603_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(603_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `i` is `[1, 1000]`. fn kill_storage(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((513_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(513_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `p` is `[1, 1000]`. fn kill_prefix(p: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_026_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(1_026_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } } diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 81ed67913c2e6..6a7f849d1329a 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -282,6 +282,8 @@ impl Pallet { #[cfg(any(feature = "runtime-benchmarks", feature = "std"))] pub fn set_timestamp(now: T::Moment) { Now::::put(now); + DidUpdate::::put(true); + >::on_timestamp_set(now); } } diff --git a/frame/timestamp/src/mock.rs b/frame/timestamp/src/mock.rs index 9536414c54db6..b4c377cfa30ef 100644 --- a/frame/timestamp/src/mock.rs +++ b/frame/timestamp/src/mock.rs @@ -49,7 +49,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -101,7 +101,7 @@ pub(crate) fn clear_captured_moment() { } pub(crate) fn get_captured_moment() -> Option { - CAPTURED_MOMENT.with(|x| x.borrow().clone()) + CAPTURED_MOMENT.with(|x| *x.borrow()) } pub(crate) fn new_test_ext() -> TestExternalities { diff --git a/frame/timestamp/src/tests.rs b/frame/timestamp/src/tests.rs index f52ba7849c951..ef9fd6e39d4b5 100644 --- a/frame/timestamp/src/tests.rs +++ b/frame/timestamp/src/tests.rs @@ -23,7 +23,7 @@ use frame_support::assert_ok; #[test] fn timestamp_works() { new_test_ext().execute_with(|| { - Timestamp::set_timestamp(42); + crate::Now::::put(46); assert_ok!(Timestamp::set(Origin::none(), 69)); assert_eq!(Timestamp::now(), 69); assert_eq!(Some(69), get_captured_moment()); @@ -36,7 +36,6 @@ fn double_timestamp_should_fail() { new_test_ext().execute_with(|| { Timestamp::set_timestamp(42); assert_ok!(Timestamp::set(Origin::none(), 69)); - let _ = Timestamp::set(Origin::none(), 70); }); } @@ -46,7 +45,7 @@ fn double_timestamp_should_fail() { )] fn block_period_minimum_enforced() { new_test_ext().execute_with(|| { - Timestamp::set_timestamp(42); + crate::Now::::put(44); let _ = Timestamp::set(Origin::none(), 46); }); } diff --git a/frame/timestamp/src/weights.rs b/frame/timestamp/src/weights.rs index 6b4ebfa74dd87..f71a0f753a43e 100644 --- a/frame/timestamp/src/weights.rs +++ b/frame/timestamp/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_timestamp. @@ -54,12 +54,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:1) // Storage: Babe CurrentSlot (r:1 w:0) fn set() -> Weight { - (8_080_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(8_080_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } fn on_finalize() -> Weight { - (2_681_000 as Weight) + Weight::from_ref_time(2_681_000 as RefTimeWeight) } } @@ -68,11 +68,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:1) // Storage: Babe CurrentSlot (r:1 w:0) fn set() -> Weight { - (8_080_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(8_080_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } fn on_finalize() -> Weight { - (2_681_000 as Weight) + Weight::from_ref_time(2_681_000 as RefTimeWeight) } } diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs index 33e455bd3b9fd..4956e2a095688 100644 --- a/frame/tips/src/benchmarking.rs +++ b/frame/tips/src/benchmarking.rs @@ -92,18 +92,20 @@ benchmarks_instance_pallet! { report_awesome { let r in 0 .. T::MaximumReasonLength::get(); let (caller, reason, awesome_person) = setup_awesome::(r); + let awesome_person_lookup = T::Lookup::unlookup(awesome_person); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), reason, awesome_person) + }: _(RawOrigin::Signed(caller), reason, awesome_person_lookup) retract_tip { let r = T::MaximumReasonLength::get(); let (caller, reason, awesome_person) = setup_awesome::(r); + let awesome_person_lookup = T::Lookup::unlookup(awesome_person.clone()); TipsMod::::report_awesome( RawOrigin::Signed(caller.clone()).into(), reason.clone(), - awesome_person.clone() + awesome_person_lookup )?; let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); @@ -117,19 +119,21 @@ benchmarks_instance_pallet! { let t in 1 .. T::Tippers::max_len() as u32; let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; + let beneficiary_lookup = T::Lookup::unlookup(beneficiary); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), reason, beneficiary, value) + }: _(RawOrigin::Signed(caller), reason, beneficiary_lookup, value) tip { let t in 1 .. T::Tippers::max_len() as u32; let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let beneficiary_lookup = T::Lookup::unlookup(beneficiary.clone()); let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); TipsMod::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), - beneficiary.clone(), + beneficiary_lookup, value )?; let reason_hash = T::Hashing::hash(&reason[..]); @@ -150,11 +154,12 @@ benchmarks_instance_pallet! { // Set up a new tip proposal let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let beneficiary_lookup = T::Lookup::unlookup(beneficiary.clone()); let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); TipsMod::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), - beneficiary.clone(), + beneficiary_lookup, value )?; @@ -179,18 +184,20 @@ benchmarks_instance_pallet! { // Set up a new tip proposal let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let beneficiary_lookup = T::Lookup::unlookup(beneficiary.clone()); let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); TipsMod::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), - beneficiary.clone(), + beneficiary_lookup, value )?; let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); ensure!(Tips::::contains_key(hash), "tip does not exist"); - }: _(RawOrigin::Root, hash) + let reject_origin = T::RejectOrigin::successful_origin(); + }: _(reject_origin, hash) impl_benchmark_test_suite!(TipsMod, crate::tests::new_test_ext(), crate::tests::Test); } diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs index 71af87b42b55b..a4697284e38a0 100644 --- a/frame/tips/src/lib.rs +++ b/frame/tips/src/lib.rs @@ -61,7 +61,7 @@ pub mod migrations; pub mod weights; use sp_runtime::{ - traits::{AccountIdConversion, BadOrigin, Hash, TrailingZeroInput, Zero}, + traits::{AccountIdConversion, BadOrigin, Hash, StaticLookup, TrailingZeroInput, Zero}, Percent, RuntimeDebug, }; use sp_std::prelude::*; @@ -80,6 +80,7 @@ pub use weights::WeightInfo; pub type BalanceOf = pallet_treasury::BalanceOf; pub type NegativeImbalanceOf = pallet_treasury::NegativeImbalanceOf; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// An open tipping "motion". Retains all details of a tip including information on the finder /// and the members who have voted. @@ -237,9 +238,10 @@ pub mod pallet { pub fn report_awesome( origin: OriginFor, reason: Vec, - who: T::AccountId, + who: AccountIdLookupOf, ) -> DispatchResult { let finder = ensure_signed(origin)?; + let who = T::Lookup::lookup(who)?; ensure!( reason.len() <= T::MaximumReasonLength::get() as usize, @@ -331,10 +333,11 @@ pub mod pallet { pub fn tip_new( origin: OriginFor, reason: Vec, - who: T::AccountId, + who: AccountIdLookupOf, #[pallet::compact] tip_value: BalanceOf, ) -> DispatchResult { let tipper = ensure_signed(origin)?; + let who = T::Lookup::lookup(who)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); let reason_hash = T::Hashing::hash(&reason[..]); ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); diff --git a/frame/tips/src/migrations/v4.rs b/frame/tips/src/migrations/v4.rs index 34f7a43ec12de..5e10fa7dd2c6d 100644 --- a/frame/tips/src/migrations/v4.rs +++ b/frame/tips/src/migrations/v4.rs @@ -49,7 +49,7 @@ pub fn migrate::on_chain_storage_version(); @@ -84,7 +84,7 @@ pub fn migrate::StillOpen); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(12), h, 10)); assert_eq!(last_event(), TipEvent::TipClosing { tip_hash: h }); @@ -340,10 +340,10 @@ fn slash_tip_works() { assert_eq!(last_event(), TipEvent::NewTip { tip_hash: h }); // can't remove from any origin - assert_noop!(Tips::slash_tip(Origin::signed(0), h.clone()), BadOrigin); + assert_noop!(Tips::slash_tip(Origin::signed(0), h), BadOrigin); // can remove from root. - assert_ok!(Tips::slash_tip(Origin::root(), h.clone())); + assert_ok!(Tips::slash_tip(Origin::root(), h)); assert_eq!(last_event(), TipEvent::TipSlashed { tip_hash: h, finder: 0, deposit: 12 }); // tipper slashed @@ -359,11 +359,11 @@ fn retract_tip_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Tips::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); - assert_ok!(Tips::retract_tip(Origin::signed(0), h.clone())); + assert_ok!(Tips::tip(Origin::signed(10), h, 10)); + assert_ok!(Tips::tip(Origin::signed(11), h, 10)); + assert_ok!(Tips::tip(Origin::signed(12), h, 10)); + assert_noop!(Tips::retract_tip(Origin::signed(10), h), Error::::NotFinder); + assert_ok!(Tips::retract_tip(Origin::signed(0), h)); System::set_block_number(2); assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); @@ -371,10 +371,10 @@ fn retract_tip_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Tips::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); - assert_ok!(Tips::retract_tip(Origin::signed(10), h.clone())); + assert_ok!(Tips::tip(Origin::signed(11), h, 10)); + assert_ok!(Tips::tip(Origin::signed(12), h, 10)); + assert_noop!(Tips::retract_tip(Origin::signed(0), h), Error::::NotFinder); + assert_ok!(Tips::retract_tip(Origin::signed(10), h)); System::set_block_number(2); assert_noop!(Tips::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); }); @@ -386,8 +386,8 @@ fn tip_median_calculation_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 1000000)); + assert_ok!(Tips::tip(Origin::signed(11), h, 10)); + assert_ok!(Tips::tip(Origin::signed(12), h, 1000000)); System::set_block_number(2); assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); assert_eq!(Balances::free_balance(3), 10); @@ -400,13 +400,13 @@ fn tip_changing_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10000)); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10000)); - assert_ok!(Tips::tip(Origin::signed(13), h.clone(), 0)); - assert_ok!(Tips::tip(Origin::signed(14), h.clone(), 0)); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 1000)); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 100)); - assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(11), h, 10000)); + assert_ok!(Tips::tip(Origin::signed(12), h, 10000)); + assert_ok!(Tips::tip(Origin::signed(13), h, 0)); + assert_ok!(Tips::tip(Origin::signed(14), h, 0)); + assert_ok!(Tips::tip(Origin::signed(12), h, 1000)); + assert_ok!(Tips::tip(Origin::signed(11), h, 100)); + assert_ok!(Tips::tip(Origin::signed(10), h, 10)); System::set_block_number(2); assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); assert_eq!(Balances::free_balance(3), 10); @@ -595,10 +595,10 @@ fn report_awesome_and_tip_works_second_instance() { assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); let h = tip_hash(); - assert_ok!(Tips1::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Tips1::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Tips1::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Tips1::tip(Origin::signed(9), h.clone(), 10), BadOrigin); + assert_ok!(Tips1::tip(Origin::signed(10), h, 10)); + assert_ok!(Tips1::tip(Origin::signed(11), h, 10)); + assert_ok!(Tips1::tip(Origin::signed(12), h, 10)); + assert_noop!(Tips1::tip(Origin::signed(9), h, 10), BadOrigin); System::set_block_number(2); diff --git a/frame/tips/src/weights.rs b/frame/tips/src/weights.rs index 4979618473fd1..6fd4ccc7478f6 100644 --- a/frame/tips/src/weights.rs +++ b/frame/tips/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_tips. @@ -58,59 +58,59 @@ impl WeightInfo for SubstrateWeight { // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:1 w:1) fn report_awesome(r: u32, ) -> Weight { - (30_669_000 as Weight) + Weight::from_ref_time(30_669_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn retract_tip() -> Weight { - (28_768_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(28_768_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:0 w:1) fn tip_new(r: u32, t: u32, ) -> Weight { - (20_385_000 as Weight) + Weight::from_ref_time(20_385_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add((166_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(166_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Tips (r:1 w:1) fn tip(t: u32, ) -> Weight { - (12_287_000 as Weight) + Weight::from_ref_time(12_287_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add((363_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(363_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn close_tip(t: u32, ) -> Weight { - (45_656_000 as Weight) + Weight::from_ref_time(45_656_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((276_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(276_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn slash_tip(t: u32, ) -> Weight { - (18_525_000 as Weight) + Weight::from_ref_time(18_525_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((37_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -119,58 +119,58 @@ impl WeightInfo for () { // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:1 w:1) fn report_awesome(r: u32, ) -> Weight { - (30_669_000 as Weight) + Weight::from_ref_time(30_669_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn retract_tip() -> Weight { - (28_768_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(28_768_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:0 w:1) fn tip_new(r: u32, t: u32, ) -> Weight { - (20_385_000 as Weight) + Weight::from_ref_time(20_385_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add((166_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(166_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Tips (r:1 w:1) fn tip(t: u32, ) -> Weight { - (12_287_000 as Weight) + Weight::from_ref_time(12_287_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add((363_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(363_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn close_tip(t: u32, ) -> Weight { - (45_656_000 as Weight) + Weight::from_ref_time(45_656_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((276_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(276_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn slash_tip(t: u32, ) -> Weight { - (18_525_000 as Weight) + Weight::from_ref_time(18_525_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((37_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index 51aeeabe99db8..9150f87c7175a 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -26,7 +26,7 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primit sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] diff --git a/frame/transaction-payment/asset-tx-payment/Cargo.toml b/frame/transaction-payment/asset-tx-payment/Cargo.toml index 2d4da250212f2..de9772d885529 100644 --- a/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -29,7 +29,7 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" serde = { version = "1.0.136", optional = true } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" sp-storage = { version = "6.0.0", default-features = false, path = "../../../primitives/storage" } diff --git a/frame/transaction-payment/asset-tx-payment/src/lib.rs b/frame/transaction-payment/asset-tx-payment/src/lib.rs index 83801c44d3578..08561375247ae 100644 --- a/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -113,6 +113,8 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + pallet_transaction_payment::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; /// The fungibles instance used to pay for transactions in assets. type Fungibles: Balanced; /// The actual transaction charging logic that charges the fees. @@ -122,6 +124,19 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A transaction fee `actual_fee`, of which `tip` was added to the minimum inclusion fee, + /// has been paid by `who` in an asset `asset_id`. + AssetTxFeePaid { + who: T::AccountId, + actual_fee: BalanceOf, + tip: BalanceOf, + asset_id: Option>, + }, + } } /// Require the transactor pay for themselves and maybe include a tip to gain additional priority @@ -213,6 +228,8 @@ where Self::AccountId, // imbalance resulting from withdrawing the fee InitialPayment, + // asset_id for the transaction payment + Option>, ); fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { @@ -240,7 +257,7 @@ where len: usize, ) -> Result { let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; - Ok((self.tip, who.clone(), initial_payment)) + Ok((self.tip, who.clone(), initial_payment, self.asset_id)) } fn post_dispatch( @@ -250,7 +267,7 @@ where len: usize, result: &DispatchResult, ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, initial_payment)) = pre { + if let Some((tip, who, initial_payment, asset_id)) = pre { match initial_payment { InitialPayment::Native(already_withdrawn) => { pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( @@ -273,6 +290,12 @@ where tip.into(), already_withdrawn.into(), )?; + Pallet::::deposit_event(Event::::AssetTxFeePaid { + who, + actual_fee, + tip, + asset_id, + }); }, InitialPayment::Nothing => { // `actual_fee` should be zero here for any signed extrinsic. It would be diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index ad5bc3f22e57f..a296a52b5e840 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -51,7 +51,7 @@ frame_support::construct_runtime!( TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, Assets: pallet_assets::{Pallet, Call, Storage, Event}, Authorship: pallet_authorship::{Pallet, Call, Storage}, - AssetTxPayment: pallet_asset_tx_payment::{Pallet}, + AssetTxPayment: pallet_asset_tx_payment::{Pallet, Event}, } ); @@ -59,19 +59,19 @@ const CALL: &::Call = &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); thread_local! { - static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(0); + static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(Weight::new()); } pub struct BlockWeights; impl Get for BlockWeights { fn get() -> frame_system::limits::BlockWeights { frame_system::limits::BlockWeights::builder() - .base_block(0) + .base_block(Weight::zero()) .for_class(DispatchClass::all(), |weights| { weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = 1024.into(); + weights.max_total = Weight::from_ref_time(1024).into(); }) .build_or_panic() } @@ -129,7 +129,8 @@ impl WeightToFeeT for WeightToFee { type Balance = u64; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(*weight).saturating_mul(WEIGHT_TO_FEE.with(|v| *v.borrow())) + Self::Balance::saturated_from(weight.ref_time()) + .saturating_mul(WEIGHT_TO_FEE.with(|v| *v.borrow())) } } @@ -137,7 +138,7 @@ impl WeightToFeeT for TransactionByteFee { type Balance = u64; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(*weight) + Self::Balance::saturated_from(weight.ref_time()) .saturating_mul(TRANSACTION_BYTE_FEE.with(|v| *v.borrow())) } } @@ -198,6 +199,7 @@ impl HandleCredit for CreditToBlockAuthor { } impl Config for Runtime { + type Event = Event; type Fungibles = Assets; type OnChargeAssetTransaction = FungiblesAdapter< pallet_assets::BalanceToAssetBalance, @@ -207,19 +209,24 @@ impl Config for Runtime { pub struct ExtBuilder { balance_factor: u64, - base_weight: u64, + base_weight: Weight, byte_fee: u64, weight_to_fee: u64, } impl Default for ExtBuilder { fn default() -> Self { - Self { balance_factor: 1, base_weight: 0, byte_fee: 1, weight_to_fee: 1 } + Self { + balance_factor: 1, + base_weight: Weight::from_ref_time(0), + byte_fee: 1, + weight_to_fee: 1, + } } } impl ExtBuilder { - pub fn base_weight(mut self, base_weight: u64) -> Self { + pub fn base_weight(mut self, base_weight: Weight) -> Self { self.base_weight = base_weight; self } @@ -282,19 +289,19 @@ fn transaction_payment_in_native_possible() { let balance_factor = 100; ExtBuilder::default() .balance_factor(balance_factor) - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .build() .execute_with(|| { let len = 10; let pre = ChargeAssetTxPayment::::from(0, None) - .pre_dispatch(&1, CALL, &info_from_weight(5), len) + .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_ref_time(5)), len) .unwrap(); let initial_balance = 10 * balance_factor; assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(5), + &info_from_weight(Weight::from_ref_time(5)), &default_post_info(), len, &Ok(()) @@ -302,15 +309,15 @@ fn transaction_payment_in_native_possible() { assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) .unwrap(); let initial_balance_for_2 = 20 * balance_factor; assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(100), - &post_info_from_weight(50), + &info_from_weight(Weight::from_ref_time(100)), + &post_info_from_weight(Weight::from_ref_time(50)), len, &Ok(()) )); @@ -324,7 +331,7 @@ fn transaction_payment_in_asset_possible() { let balance_factor = 100; ExtBuilder::default() .balance_factor(balance_factor) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -350,7 +357,7 @@ fn transaction_payment_in_asset_possible() { // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -360,7 +367,7 @@ fn transaction_payment_in_asset_possible() { assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(weight), + &info_from_weight(Weight::from_ref_time(weight)), &default_post_info(), len, &Ok(()) @@ -377,7 +384,7 @@ fn transaction_payment_without_fee() { let balance_factor = 100; ExtBuilder::default() .balance_factor(balance_factor) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -403,7 +410,7 @@ fn transaction_payment_without_fee() { // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -413,7 +420,7 @@ fn transaction_payment_without_fee() { assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(weight), + &info_from_weight(Weight::from_ref_time(weight)), &post_info_from_pays(Pays::No), len, &Ok(()) @@ -430,7 +437,7 @@ fn asset_transaction_payment_with_tip_and_refund() { let base_weight = 5; ExtBuilder::default() .balance_factor(100) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -458,15 +465,15 @@ fn asset_transaction_payment_with_tip_and_refund() { let fee_with_tip = (base_weight + weight + len as u64 + tip) * min_balance / ExistentialDeposit::get(); let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); let final_weight = 50; assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(weight), - &post_info_from_weight(final_weight), + &info_from_weight(Weight::from_ref_time(weight)), + &post_info_from_weight(Weight::from_ref_time(final_weight)), len, &Ok(()) )); @@ -482,7 +489,7 @@ fn payment_from_account_with_only_assets() { let base_weight = 5; ExtBuilder::default() .balance_factor(100) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -510,7 +517,7 @@ fn payment_from_account_with_only_assets() { // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .unwrap(); assert_eq!(Balances::free_balance(caller), 0); // check that fee was charged in the given asset @@ -518,7 +525,7 @@ fn payment_from_account_with_only_assets() { assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(weight), + &info_from_weight(Weight::from_ref_time(weight)), &default_post_info(), len, &Ok(()) @@ -533,7 +540,7 @@ fn payment_only_with_existing_sufficient_asset() { let base_weight = 5; ExtBuilder::default() .balance_factor(100) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { let asset_id = 1; @@ -542,7 +549,7 @@ fn payment_only_with_existing_sufficient_asset() { let len = 10; // pre_dispatch fails for non-existent asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .is_err()); // create the non-sufficient asset @@ -556,7 +563,7 @@ fn payment_only_with_existing_sufficient_asset() { )); // pre_dispatch fails for non-sufficient asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .is_err()); }); } @@ -566,7 +573,7 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { let base_weight = 1; ExtBuilder::default() .balance_factor(100) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -610,14 +617,14 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { assert_eq!(Assets::balance(asset_id, caller), balance); } let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .unwrap(); // check that at least one coin was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - 1); assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(weight), + &info_from_weight(Weight::from_ref_time(weight)), &default_post_info(), len, &Ok(()) @@ -631,7 +638,7 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { let base_weight = 1; ExtBuilder::default() .balance_factor(100) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -663,7 +670,7 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { .unwrap(); // `Pays::No` implies no pre-dispatch fees assert_eq!(Assets::balance(asset_id, caller), balance); - let (_tip, _who, initial_payment) = ⪯ + let (_tip, _who, initial_payment, _asset_id) = ⪯ let not_paying = match initial_payment { &InitialPayment::Nothing => true, _ => false, @@ -688,7 +695,7 @@ fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { let base_weight = 1; ExtBuilder::default() .balance_factor(100) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -712,7 +719,7 @@ fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { let len = 1; ChargeAssetTxPayment::::pre_dispatch_unsigned( CALL, - &info_from_weight(weight), + &info_from_weight(Weight::from_ref_time(weight)), len, ) .unwrap(); @@ -723,7 +730,7 @@ fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { // initial fee) assert_ok!(ChargeAssetTxPayment::::post_dispatch( None, - &info_from_weight(weight), + &info_from_weight(Weight::from_ref_time(weight)), &post_info_from_pays(Pays::Yes), len, &Ok(()) diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 31e0972a0d5b5..16c2cc55efefb 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", path = "./runtime-api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index 5a0c70138db24..6944593daa57a 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -31,4 +31,16 @@ sp_api::decl_runtime_apis! { fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_fee_details(uxt: Block::Extrinsic, len: u32) -> FeeDetails; } + + pub trait TransactionPaymentCallApi + where + Balance: Codec + MaybeDisplay, + Call: Codec, + { + /// Query information of a dispatch class, weight, and fee of a given encoded `Call`. + fn query_call_info(call: Call, len: u32) -> RuntimeDispatchInfo; + + /// Query fee details of a given encoded `Call`. + fn query_call_fee_details(call: Call, len: u32) -> FeeDetails; + } } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 0f5c0321130be..9777d7d240491 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -66,7 +66,8 @@ use frame_support::{ dispatch::DispatchResult, traits::{EstimateCallFee, Get}, weights::{ - DispatchClass, DispatchInfo, GetDispatchInfo, Pays, PostDispatchInfo, Weight, WeightToFee, + DispatchClass, DispatchInfo, GetDispatchInfo, Pays, PostDispatchInfo, RefTimeWeight, + Weight, WeightToFee, }, }; @@ -189,7 +190,11 @@ where weights.get(DispatchClass::Normal).max_total.unwrap_or(weights.max_block); let current_block_weight = >::block_weight(); let normal_block_weight = - *current_block_weight.get(DispatchClass::Normal).min(&normal_max_weight); + current_block_weight.get(DispatchClass::Normal).min(normal_max_weight); + + // TODO: Handle all weight dimensions + let normal_max_weight = normal_max_weight.ref_time(); + let normal_block_weight = normal_block_weight.ref_time(); let s = S::get(); let v = V::get(); @@ -347,7 +352,7 @@ pub mod pallet { assert!( ::max_value() >= Multiplier::checked_from_integer::( - T::BlockWeights::get().max_block.try_into().unwrap() + T::BlockWeights::get().max_block.ref_time().try_into().unwrap() ) .unwrap(), ); @@ -359,7 +364,7 @@ pub mod pallet { ); // add 1 percent; let addition = target / 100; - if addition == 0 { + if addition == Weight::zero() { // this is most likely because in a test setup we set everything to (). return } @@ -445,6 +450,32 @@ where } } + /// Query information of a dispatch class, weight, and fee of a given encoded `Call`. + pub fn query_call_info(call: T::Call, len: u32) -> RuntimeDispatchInfo> + where + T::Call: Dispatchable + GetDispatchInfo, + { + let dispatch_info = ::get_dispatch_info(&call); + let DispatchInfo { weight, class, .. } = dispatch_info; + + RuntimeDispatchInfo { + weight, + class, + partial_fee: Self::compute_fee(len, &dispatch_info, 0u32.into()), + } + } + + /// Query fee details of a given encoded `Call`. + pub fn query_call_fee_details(call: T::Call, len: u32) -> FeeDetails> + where + T::Call: Dispatchable + GetDispatchInfo, + { + let dispatch_info = ::get_dispatch_info(&call); + let tip = 0u32.into(); + + Self::compute_fee_details(len, &dispatch_info, tip) + } + /// Compute the final fee value for a particular transaction. pub fn compute_fee(len: u32, info: &DispatchInfoOf, tip: BalanceOf) -> BalanceOf where @@ -528,7 +559,7 @@ where } fn length_to_fee(length: u32) -> BalanceOf { - T::LengthToFee::weight_to_fee(&(length as Weight)) + T::LengthToFee::weight_to_fee(&Weight::from_ref_time(length as RefTimeWeight)) } fn weight_to_fee(weight: Weight) -> BalanceOf { @@ -609,7 +640,7 @@ where /// and user-included tip. /// /// The priority is based on the amount of `tip` the user is willing to pay per unit of either - /// `weight` or `length`, depending which one is more limitting. For `Operational` extrinsics + /// `weight` or `length`, depending which one is more limiting. For `Operational` extrinsics /// we add a "virtual tip" to the calculations. /// /// The formula should simply be `tip / bounded_{weight|length}`, but since we are using @@ -629,7 +660,11 @@ where let max_block_weight = T::BlockWeights::get().max_block; let max_block_length = *T::BlockLength::get().max.get(info.class) as u64; - let bounded_weight = info.weight.max(1).min(max_block_weight); + // TODO: Take into account all dimensions of weight + let max_block_weight = max_block_weight.ref_time(); + let info_weight = info.weight.ref_time(); + + let bounded_weight = info_weight.max(1).min(max_block_weight); let bounded_length = (len as u64).max(1).min(max_block_length); let max_tx_per_block_weight = max_block_weight / bounded_weight; @@ -810,19 +845,19 @@ mod tests { &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); thread_local! { - static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(0); + static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(Weight::new()); } pub struct BlockWeights; impl Get for BlockWeights { fn get() -> frame_system::limits::BlockWeights { frame_system::limits::BlockWeights::builder() - .base_block(0) + .base_block(Weight::zero()) .for_class(DispatchClass::all(), |weights| { weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = 1024.into(); + weights.max_total = Weight::from_ref_time(1024).into(); }) .build_or_panic() } @@ -877,7 +912,7 @@ mod tests { type Balance = u64; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(*weight) + Self::Balance::saturated_from(weight.ref_time()) .saturating_mul(WEIGHT_TO_FEE.with(|v| *v.borrow())) } } @@ -886,7 +921,7 @@ mod tests { type Balance = u64; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(*weight) + Self::Balance::saturated_from(weight.ref_time()) .saturating_mul(TRANSACTION_BYTE_FEE.with(|v| *v.borrow())) } } @@ -921,19 +956,19 @@ mod tests { pub struct ExtBuilder { balance_factor: u64, - base_weight: u64, + base_weight: Weight, byte_fee: u64, weight_to_fee: u64, } impl Default for ExtBuilder { fn default() -> Self { - Self { balance_factor: 1, base_weight: 0, byte_fee: 1, weight_to_fee: 1 } + Self { balance_factor: 1, base_weight: Weight::zero(), byte_fee: 1, weight_to_fee: 1 } } } impl ExtBuilder { - pub fn base_weight(mut self, base_weight: u64) -> Self { + pub fn base_weight(mut self, base_weight: Weight) -> Self { self.base_weight = base_weight; self } @@ -999,43 +1034,43 @@ mod tests { fn signed_extension_transaction_payment_work() { ExtBuilder::default() .balance_factor(10) - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .build() .execute_with(|| { let len = 10; let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(5), len) + .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_ref_time(5)), len) .unwrap(); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(5), + &info_from_weight(Weight::from_ref_time(5)), &default_post_info(), len, &Ok(()) )); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5 + 5 + 10); - assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 0); + assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow()), 5 + 5 + 10); + assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| *a.borrow()), 0); FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() = 0); let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(100), - &post_info_from_weight(50), + &info_from_weight(Weight::from_ref_time(100)), + &post_info_from_weight(Weight::from_ref_time(50)), len, &Ok(()) )); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); - assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5 + 10 + 50); - assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5); + assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow()), 5 + 10 + 50); + assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| *a.borrow()), 5); }); } @@ -1043,22 +1078,22 @@ mod tests { fn signed_extension_transaction_payment_multiplied_refund_works() { ExtBuilder::default() .balance_factor(10) - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .build() .execute_with(|| { let len = 10; >::put(Multiplier::saturating_from_rational(3, 2)); let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) .unwrap(); // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(100), - &post_info_from_weight(50), + &info_from_weight(Weight::from_ref_time(100)), + &post_info_from_weight(Weight::from_ref_time(50)), len, &Ok(()) )); @@ -1074,13 +1109,14 @@ mod tests { assert_ok!(ChargeTransactionPayment::::from(0).pre_dispatch( &1, CALL, - &info_from_weight(Weight::max_value()), + &info_from_weight(Weight::MAX), 10 )); // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), - (10000 - ::BlockWeights::get().max_block) as u64 + (10000 - + ::BlockWeights::get().max_block.ref_time()) as u64 ); }); } @@ -1088,7 +1124,7 @@ mod tests { #[test] fn signed_extension_allows_free_transactions() { ExtBuilder::default() - .base_weight(100) + .base_weight(Weight::from_ref_time(100)) .balance_factor(0) .build() .execute_with(|| { @@ -1099,7 +1135,7 @@ mod tests { // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. let operational_transaction = DispatchInfo { - weight: 0, + weight: Weight::from_ref_time(0), class: DispatchClass::Operational, pays_fee: Pays::No, }; @@ -1111,8 +1147,11 @@ mod tests { )); // like a InsecureFreeNormal - let free_transaction = - DispatchInfo { weight: 0, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let free_transaction = DispatchInfo { + weight: Weight::from_ref_time(0), + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }; assert_noop!( ChargeTransactionPayment::::from(0).validate( &1, @@ -1128,7 +1167,7 @@ mod tests { #[test] fn signed_ext_length_fee_is_also_updated_per_congestion() { ExtBuilder::default() - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .balance_factor(10) .build() .execute_with(|| { @@ -1136,8 +1175,10 @@ mod tests { >::put(Multiplier::saturating_from_rational(3, 2)); let len = 10; - assert_ok!(ChargeTransactionPayment::::from(10) // tipped - .pre_dispatch(&1, CALL, &info_from_weight(3), len)); + assert_ok!( + ChargeTransactionPayment::::from(10) // tipped + .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_ref_time(3)), len) + ); assert_eq!( Balances::free_balance(1), 100 // original @@ -1162,54 +1203,103 @@ mod tests { let unsigned_xt = TestXt::<_, ()>::new(call, None); let unsigned_xt_info = unsigned_xt.get_dispatch_info(); - ExtBuilder::default().base_weight(5).weight_fee(2).build().execute_with(|| { - // all fees should be x1.5 - >::put(Multiplier::saturating_from_rational(3, 2)); + ExtBuilder::default() + .base_weight(Weight::from_ref_time(5)) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); - assert_eq!( - TransactionPayment::query_info(xt.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ + assert_eq!( + TransactionPayment::query_info(xt.clone(), len), + RuntimeDispatchInfo { + weight: info.weight, + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block) as u64 * 2 * 3 / 2 /* weight */ - }, - ); + + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ + }, + ); - assert_eq!( - TransactionPayment::query_info(unsigned_xt.clone(), len), - RuntimeDispatchInfo { - weight: unsigned_xt_info.weight, - class: unsigned_xt_info.class, - partial_fee: 0, - }, - ); + assert_eq!( + TransactionPayment::query_info(unsigned_xt.clone(), len), + RuntimeDispatchInfo { + weight: unsigned_xt_info.weight, + class: unsigned_xt_info.class, + partial_fee: 0, + }, + ); - assert_eq!( - TransactionPayment::query_fee_details(xt, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, - len_fee: len as u64, - adjusted_weight_fee: info.weight.min(BlockWeights::get().max_block) as u64 * - 2 * 3 / 2 - }), - tip: 0, - }, - ); + assert_eq!( + TransactionPayment::query_fee_details(xt, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, + len_fee: len as u64, + adjusted_weight_fee: info + .weight + .min(BlockWeights::get().max_block) + .ref_time() as u64 * 2 * 3 / 2 + }), + tip: 0, + }, + ); - assert_eq!( - TransactionPayment::query_fee_details(unsigned_xt, len), - FeeDetails { inclusion_fee: None, tip: 0 }, - ); - }); + assert_eq!( + TransactionPayment::query_fee_details(unsigned_xt, len), + FeeDetails { inclusion_fee: None, tip: 0 }, + ); + }); + } + + #[test] + fn query_call_info_and_fee_details_works() { + let call = Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + let info = call.get_dispatch_info(); + let encoded_call = call.encode(); + let len = encoded_call.len() as u32; + + ExtBuilder::default() + .base_weight(Weight::from_ref_time(5)) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); + + assert_eq!( + TransactionPayment::query_call_info(call.clone(), len), + RuntimeDispatchInfo { + weight: info.weight, + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + + len as u64 /* len * 1 */ + + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ + }, + ); + + assert_eq!( + TransactionPayment::query_call_fee_details(call, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, /* base * weight_fee */ + len_fee: len as u64, /* len * 1 */ + adjusted_weight_fee: info + .weight + .min(BlockWeights::get().max_block) + .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multipler */ + }), + tip: 0, + }, + ); + }); } #[test] fn compute_fee_works_without_multiplier() { ExtBuilder::default() - .base_weight(100) + .base_weight(Weight::from_ref_time(100)) .byte_fee(10) .balance_factor(0) .build() @@ -1219,14 +1309,14 @@ mod tests { // Tip only, no fees works let dispatch_info = DispatchInfo { - weight: 0, + weight: Weight::from_ref_time(0), class: DispatchClass::Operational, pays_fee: Pays::No, }; assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 10), 10); // No tip, only base fee works let dispatch_info = DispatchInfo { - weight: 0, + weight: Weight::from_ref_time(0), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1237,7 +1327,7 @@ mod tests { assert_eq!(Pallet::::compute_fee(42, &dispatch_info, 0), 520); // Weight fee + base fee works let dispatch_info = DispatchInfo { - weight: 1000, + weight: Weight::from_ref_time(1000), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1248,7 +1338,7 @@ mod tests { #[test] fn compute_fee_works_with_multiplier() { ExtBuilder::default() - .base_weight(100) + .base_weight(Weight::from_ref_time(100)) .byte_fee(10) .balance_factor(0) .build() @@ -1257,7 +1347,7 @@ mod tests { >::put(Multiplier::saturating_from_rational(3, 2)); // Base fee is unaffected by multiplier let dispatch_info = DispatchInfo { - weight: 0, + weight: Weight::from_ref_time(0), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1265,7 +1355,7 @@ mod tests { // Everything works together :) let dispatch_info = DispatchInfo { - weight: 123, + weight: Weight::from_ref_time(123), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1280,7 +1370,7 @@ mod tests { #[test] fn compute_fee_works_with_negative_multiplier() { ExtBuilder::default() - .base_weight(100) + .base_weight(Weight::from_ref_time(100)) .byte_fee(10) .balance_factor(0) .build() @@ -1290,7 +1380,7 @@ mod tests { // Base fee is unaffected by multiplier. let dispatch_info = DispatchInfo { - weight: 0, + weight: Weight::from_ref_time(0), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1298,7 +1388,7 @@ mod tests { // Everything works together. let dispatch_info = DispatchInfo { - weight: 123, + weight: Weight::from_ref_time(123), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1313,14 +1403,14 @@ mod tests { #[test] fn compute_fee_does_not_overflow() { ExtBuilder::default() - .base_weight(100) + .base_weight(Weight::from_ref_time(100)) .byte_fee(10) .balance_factor(0) .build() .execute_with(|| { // Overflow is handled let dispatch_info = DispatchInfo { - weight: Weight::max_value(), + weight: Weight::MAX, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1335,14 +1425,14 @@ mod tests { fn refund_does_not_recreate_account() { ExtBuilder::default() .balance_factor(10) - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .build() .execute_with(|| { // So events are emitted System::set_block_number(10); let len = 10; let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); @@ -1352,8 +1442,8 @@ mod tests { assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(100), - &post_info_from_weight(50), + &info_from_weight(Weight::from_ref_time(100)), + &post_info_from_weight(Weight::from_ref_time(50)), len, &Ok(()) )); @@ -1375,19 +1465,19 @@ mod tests { fn actual_weight_higher_than_max_refunds_nothing() { ExtBuilder::default() .balance_factor(10) - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .build() .execute_with(|| { let len = 10; let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(100), - &post_info_from_weight(101), + &info_from_weight(Weight::from_ref_time(100)), + &post_info_from_weight(Weight::from_ref_time(101)), len, &Ok(()) )); @@ -1399,14 +1489,17 @@ mod tests { fn zero_transfer_on_free_transaction() { ExtBuilder::default() .balance_factor(10) - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .build() .execute_with(|| { // So events are emitted System::set_block_number(10); let len = 10; - let dispatch_info = - DispatchInfo { weight: 100, pays_fee: Pays::No, class: DispatchClass::Normal }; + let dispatch_info = DispatchInfo { + weight: Weight::from_ref_time(100), + pays_fee: Pays::No, + class: DispatchClass::Normal, + }; let user = 69; let pre = ChargeTransactionPayment::::from(0) .pre_dispatch(&user, CALL, &dispatch_info, len) @@ -1435,11 +1528,11 @@ mod tests { fn refund_consistent_with_actual_weight() { ExtBuilder::default() .balance_factor(10) - .base_weight(7) + .base_weight(Weight::from_ref_time(7)) .build() .execute_with(|| { - let info = info_from_weight(100); - let post_info = post_info_from_weight(33); + let info = info_from_weight(Weight::from_ref_time(100)); + let post_info = post_info_from_weight(Weight::from_ref_time(33)); let prev_balance = Balances::free_balance(2); let len = 10; let tip = 5; @@ -1475,8 +1568,11 @@ mod tests { let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { - let normal = - DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let normal = DispatchInfo { + weight: Weight::from_ref_time(100), + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }; let priority = ChargeTransactionPayment::(tip) .validate(&2, CALL, &normal, len) .unwrap() @@ -1494,7 +1590,7 @@ mod tests { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: 100, + weight: Weight::from_ref_time(100), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1518,8 +1614,11 @@ mod tests { let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { - let normal = - DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let normal = DispatchInfo { + weight: Weight::from_ref_time(100), + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }; let priority = ChargeTransactionPayment::(tip) .validate(&2, CALL, &normal, len) .unwrap() @@ -1530,7 +1629,7 @@ mod tests { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: 100, + weight: Weight::from_ref_time(100), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1549,8 +1648,11 @@ mod tests { let mut priority2 = 0; let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { - let normal = - DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let normal = DispatchInfo { + weight: Weight::from_ref_time(100), + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }; priority1 = ChargeTransactionPayment::(tip) .validate(&2, CALL, &normal, len) .unwrap() @@ -1559,7 +1661,7 @@ mod tests { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: 100, + weight: Weight::from_ref_time(100), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1586,10 +1688,10 @@ mod tests { fn post_info_can_change_pays_fee() { ExtBuilder::default() .balance_factor(10) - .base_weight(7) + .base_weight(Weight::from_ref_time(7)) .build() .execute_with(|| { - let info = info_from_weight(100); + let info = info_from_weight(Weight::from_ref_time(100)); let post_info = post_info_from_pays(Pays::No); let prev_balance = Balances::free_balance(2); let len = 10; diff --git a/frame/transaction-payment/src/types.rs b/frame/transaction-payment/src/types.rs index 3faebfed48946..5e915d62a26d4 100644 --- a/frame/transaction-payment/src/types.rs +++ b/frame/transaction-payment/src/types.rs @@ -135,12 +135,12 @@ mod tests { #[test] fn should_serialize_and_deserialize_properly_with_string() { let info = RuntimeDispatchInfo { - weight: 5, + weight: Weight::from_ref_time(5), class: DispatchClass::Normal, partial_fee: 1_000_000_u64, }; - let json_str = r#"{"weight":5,"class":"normal","partialFee":"1000000"}"#; + let json_str = r#"{"weight":{"ref_time":5},"class":"normal","partialFee":"1000000"}"#; assert_eq!(serde_json::to_string(&info).unwrap(), json_str); assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); @@ -152,12 +152,12 @@ mod tests { #[test] fn should_serialize_and_deserialize_properly_large_value() { let info = RuntimeDispatchInfo { - weight: 5, + weight: Weight::from_ref_time(5), class: DispatchClass::Normal, partial_fee: u128::max_value(), }; - let json_str = r#"{"weight":5,"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; + let json_str = r#"{"weight":{"ref_time":5},"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; assert_eq!(serde_json::to_string(&info).unwrap(), json_str); assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index f16b8f029662b..681cd29af8222 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -245,9 +245,11 @@ pub mod pallet { let sender = ensure_signed(origin)?; let transactions = >::get(block).ok_or(Error::::RenewedNotFound)?; let info = transactions.get(index as usize).ok_or(Error::::RenewedNotFound)?; + let extrinsic_index = + >::extrinsic_index().ok_or(Error::::BadContext)?; + Self::apply_fee(sender, info.size)?; - let extrinsic_index = >::extrinsic_index().unwrap(); sp_io::transaction_index::renew(extrinsic_index, info.content_hash.into()); let mut index = 0; diff --git a/frame/transaction-storage/src/tests.rs b/frame/transaction-storage/src/tests.rs index 8825890ae67a2..4f5ce1c4b654d 100644 --- a/frame/transaction-storage/src/tests.rs +++ b/frame/transaction-storage/src/tests.rs @@ -31,11 +31,11 @@ fn discards_data() { run_to_block(1, || None); let caller = 1; assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller.clone()).into(), + RawOrigin::Signed(caller).into(), vec![0u8; 2000 as usize] )); assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller.clone()).into(), + RawOrigin::Signed(caller).into(), vec![0u8; 2000 as usize] )); let proof_provider = || { @@ -74,7 +74,7 @@ fn burns_fee() { Error::::InsufficientFunds, ); assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller.clone()).into(), + RawOrigin::Signed(caller).into(), vec![0u8; 2000 as usize] )); assert_eq!(Balances::free_balance(1), 1_000_000_000 - 2000 * 2 - 200); @@ -87,7 +87,7 @@ fn checks_proof() { run_to_block(1, || None); let caller = 1; assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller.clone()).into(), + RawOrigin::Signed(caller).into(), vec![0u8; MAX_DATA_SIZE as usize] )); run_to_block(10, || None); @@ -119,13 +119,13 @@ fn renews_data() { run_to_block(1, || None); let caller = 1; assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller.clone()).into(), + RawOrigin::Signed(caller).into(), vec![0u8; 2000] )); let info = BlockTransactions::::get().last().unwrap().clone(); run_to_block(6, || None); assert_ok!(TransactionStorage::::renew( - RawOrigin::Signed(caller.clone()).into(), + RawOrigin::Signed(caller).into(), 1, // block 0, // transaction )); diff --git a/frame/transaction-storage/src/weights.rs b/frame/transaction-storage/src/weights.rs index b8bc4890a416e..54d5b0723aad6 100644 --- a/frame/transaction-storage/src/weights.rs +++ b/frame/transaction-storage/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_transaction_storage. @@ -59,11 +59,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionStorage BlockTransactions (r:1 w:1) // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn store(l: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: TransactionStorage Transactions (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) @@ -72,9 +72,9 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionStorage BlockTransactions (r:1 w:1) // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn renew() -> Weight { - (50_978_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(50_978_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: TransactionStorage ProofChecked (r:1 w:1) // Storage: TransactionStorage StoragePeriod (r:1 w:0) @@ -82,9 +82,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System ParentHash (r:1 w:0) // Storage: TransactionStorage Transactions (r:1 w:0) fn check_proof_max() -> Weight { - (106_990_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(106_990_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -97,11 +97,11 @@ impl WeightInfo for () { // Storage: TransactionStorage BlockTransactions (r:1 w:1) // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn store(l: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: TransactionStorage Transactions (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) @@ -110,9 +110,9 @@ impl WeightInfo for () { // Storage: TransactionStorage BlockTransactions (r:1 w:1) // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn renew() -> Weight { - (50_978_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(50_978_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: TransactionStorage ProofChecked (r:1 w:1) // Storage: TransactionStorage StoragePeriod (r:1 w:0) @@ -120,8 +120,8 @@ impl WeightInfo for () { // Storage: System ParentHash (r:1 w:0) // Storage: TransactionStorage Transactions (r:1 w:0) fn check_proof_max() -> Weight { - (106_990_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(106_990_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index ddb952383370d..b2b670d86f07c 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -34,7 +34,7 @@ const SEED: u32 = 0; // Create the pre-requisite information needed to create a treasury `propose_spend`. fn setup_proposal, I: 'static>( u: u32, -) -> (T::AccountId, BalanceOf, ::Source) { +) -> (T::AccountId, BalanceOf, AccountIdLookupOf) { let caller = account("caller", u, SEED); let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100u32.into()); let _ = T::Currency::make_free_balance_be(&caller, value); @@ -99,7 +99,8 @@ benchmarks_instance_pallet! { beneficiary_lookup )?; let proposal_id = Treasury::::proposal_count() - 1; - }: _(RawOrigin::Root, proposal_id) + let reject_origin = T::RejectOrigin::successful_origin(); + }: _(reject_origin, proposal_id) approve_proposal { let p in 0 .. T::MaxApprovals::get() - 1; @@ -111,7 +112,8 @@ benchmarks_instance_pallet! { beneficiary_lookup )?; let proposal_id = Treasury::::proposal_count() - 1; - }: _(RawOrigin::Root, proposal_id) + let approve_origin = T::ApproveOrigin::successful_origin(); + }: _(approve_origin, proposal_id) remove_approval { let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); @@ -122,7 +124,8 @@ benchmarks_instance_pallet! { )?; let proposal_id = Treasury::::proposal_count() - 1; Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; - }: _(RawOrigin::Root, proposal_id) + let reject_origin = T::RejectOrigin::successful_origin(); + }: _(reject_origin, proposal_id) on_initialize_proposals { let p in 0 .. T::MaxApprovals::get(); diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index 6730f985b16e0..eecf225beea9b 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -93,6 +93,7 @@ pub type PositiveImbalanceOf = <>::Currency as Currenc pub type NegativeImbalanceOf = <>::Currency as Currency< ::AccountId, >>::NegativeImbalance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// A trait to allow the Treasury Pallet to spend it's funds for other purposes. /// There is an expectation that the implementer of this trait will correctly manage @@ -318,7 +319,7 @@ pub mod pallet { if (n % T::SpendPeriod::get()).is_zero() { Self::spend_funds() } else { - 0 + Weight::zero() } } } @@ -338,7 +339,7 @@ pub mod pallet { pub fn propose_spend( origin: OriginFor, #[pallet::compact] value: BalanceOf, - beneficiary: ::Source, + beneficiary: AccountIdLookupOf, ) -> DispatchResult { let proposer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; @@ -419,7 +420,7 @@ pub mod pallet { pub fn spend( origin: OriginFor, #[pallet::compact] amount: BalanceOf, - beneficiary: ::Source, + beneficiary: AccountIdLookupOf, ) -> DispatchResult { let max_amount = T::SpendOrigin::ensure_origin(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; @@ -499,7 +500,7 @@ impl, I: 'static> Pallet { /// Spend some money! returns number of approvals before spend. pub fn spend_funds() -> Weight { - let mut total_weight: Weight = Zero::zero(); + let mut total_weight = Weight::new(); let mut budget_remaining = Self::pot(); Self::deposit_event(Event::Spending { budget_remaining }); diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 61eafb652427b..bec96daf576e3 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -55,7 +55,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index f6b5414a05652..74e6e9779000e 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_treasury. @@ -58,51 +58,51 @@ impl WeightInfo for SubstrateWeight { // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn spend() -> Weight { - (22_063_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(22_063_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn propose_spend() -> Weight { - (26_473_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(26_473_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Treasury Proposals (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_proposal() -> Weight { - (29_955_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_955_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Treasury Proposals (r:1 w:0) // Storage: Treasury Approvals (r:1 w:1) fn approve_proposal(p: u32, ) -> Weight { - (10_786_000 as Weight) + Weight::from_ref_time(10_786_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((110_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(110_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Treasury Approvals (r:1 w:1) fn remove_approval() -> Weight { - (6_647_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(6_647_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Treasury Approvals (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Treasury Proposals (r:2 w:2) // Storage: System Account (r:4 w:4) fn on_initialize_proposals(p: u32, ) -> Weight { - (25_805_000 as Weight) + Weight::from_ref_time(25_805_000 as RefTimeWeight) // Standard Error: 18_000 - .saturating_add((28_473_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(28_473_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } } @@ -111,50 +111,50 @@ impl WeightInfo for () { // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn spend() -> Weight { - (22_063_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(22_063_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn propose_spend() -> Weight { - (26_473_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(26_473_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Treasury Proposals (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_proposal() -> Weight { - (29_955_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_955_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Treasury Proposals (r:1 w:0) // Storage: Treasury Approvals (r:1 w:1) fn approve_proposal(p: u32, ) -> Weight { - (10_786_000 as Weight) + Weight::from_ref_time(10_786_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((110_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(110_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Treasury Approvals (r:1 w:1) fn remove_approval() -> Weight { - (6_647_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(6_647_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Treasury Approvals (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Treasury Proposals (r:2 w:2) // Storage: System Account (r:4 w:4) fn on_initialize_proposals(p: u32, ) -> Weight { - (25_805_000 as Weight) + Weight::from_ref_time(25_805_000 as RefTimeWeight) // Standard Error: 18_000 - .saturating_add((28_473_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(28_473_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } } diff --git a/frame/uniques/src/benchmarking.rs b/frame/uniques/src/benchmarking.rs index 14a3e2c61809a..3e3148b5b5fc2 100644 --- a/frame/uniques/src/benchmarking.rs +++ b/frame/uniques/src/benchmarking.rs @@ -37,7 +37,7 @@ use crate::Pallet as Uniques; const SEED: u32 = 0; fn create_collection, I: 'static>( -) -> (T::CollectionId, T::AccountId, ::Source) { +) -> (T::CollectionId, T::AccountId, AccountIdLookupOf) { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let collection = T::Helper::collection(0); @@ -52,8 +52,7 @@ fn create_collection, I: 'static>( (collection, caller, caller_lookup) } -fn add_collection_metadata, I: 'static>( -) -> (T::AccountId, ::Source) { +fn add_collection_metadata, I: 'static>() -> (T::AccountId, AccountIdLookupOf) { let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -71,7 +70,7 @@ fn add_collection_metadata, I: 'static>( fn mint_item, I: 'static>( index: u16, -) -> (T::ItemId, T::AccountId, ::Source) { +) -> (T::ItemId, T::AccountId, AccountIdLookupOf) { let caller = Collection::::get(T::Helper::collection(0)).unwrap().admin; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -90,7 +89,7 @@ fn mint_item, I: 'static>( fn add_item_metadata, I: 'static>( item: T::ItemId, -) -> (T::AccountId, ::Source) { +) -> (T::AccountId, AccountIdLookupOf) { let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -109,7 +108,7 @@ fn add_item_metadata, I: 'static>( fn add_item_attribute, I: 'static>( item: T::ItemId, -) -> (BoundedVec, T::AccountId, ::Source) { +) -> (BoundedVec, T::AccountId, AccountIdLookupOf) { let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -408,5 +407,41 @@ benchmarks_instance_pallet! { }.into()); } + set_price { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let price = ItemPrice::::from(100u32); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, Some(price), Some(delegate_lookup)) + verify { + assert_last_event::(Event::ItemPriceSet { + collection, + item, + price, + whitelisted_buyer: Some(delegate), + }.into()); + } + + buy_item { + let (collection, seller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let buyer: T::AccountId = account("buyer", 0, SEED); + let buyer_lookup = T::Lookup::unlookup(buyer.clone()); + let price = ItemPrice::::from(0u32); + let origin = SystemOrigin::Signed(seller.clone()).into(); + Uniques::::set_price(origin, collection, item, Some(price.clone()), Some(buyer_lookup))?; + T::Currency::make_free_balance_be(&buyer, DepositBalanceOf::::max_value()); + }: _(SystemOrigin::Signed(buyer.clone()), collection, item, price.clone()) + verify { + assert_last_event::(Event::ItemBought { + collection, + item, + price, + seller, + buyer, + }.into()); + } + impl_benchmark_test_suite!(Uniques, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs index 155fb35ef0999..107214558307f 100644 --- a/frame/uniques/src/functions.rs +++ b/frame/uniques/src/functions.rs @@ -18,7 +18,10 @@ //! Various pieces of common functionality. use super::*; -use frame_support::{ensure, traits::Get}; +use frame_support::{ + ensure, + traits::{ExistenceRequirement, Get}, +}; use sp_runtime::{DispatchError, DispatchResult}; impl, I: 'static> Pallet { @@ -46,6 +49,7 @@ impl, I: 'static> Pallet { let origin = details.owner; details.owner = dest; Item::::insert(&collection, &item, &details); + ItemPriceOf::::remove(&collection, &item); Self::deposit_event(Event::Transferred { collection, @@ -112,6 +116,8 @@ impl, I: 'static> Pallet { } #[allow(deprecated)] ItemMetadataOf::::remove_prefix(&collection, None); + #[allow(deprecated)] + ItemPriceOf::::remove_prefix(&collection, None); CollectionMetadataOf::::remove(&collection); #[allow(deprecated)] Attribute::::remove_prefix((&collection,), None); @@ -196,8 +202,75 @@ impl, I: 'static> Pallet { Item::::remove(&collection, &item); Account::::remove((&owner, &collection, &item)); + ItemPriceOf::::remove(&collection, &item); Self::deposit_event(Event::Burned { collection, item, owner }); Ok(()) } + + pub fn do_set_price( + collection: T::CollectionId, + item: T::ItemId, + sender: T::AccountId, + price: Option>, + whitelisted_buyer: Option, + ) -> DispatchResult { + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(details.owner == sender, Error::::NoPermission); + + if let Some(ref price) = price { + ItemPriceOf::::insert(&collection, &item, (price, whitelisted_buyer.clone())); + Self::deposit_event(Event::ItemPriceSet { + collection, + item, + price: *price, + whitelisted_buyer, + }); + } else { + ItemPriceOf::::remove(&collection, &item); + Self::deposit_event(Event::ItemPriceRemoved { collection, item }); + } + + Ok(()) + } + + pub fn do_buy_item( + collection: T::CollectionId, + item: T::ItemId, + buyer: T::AccountId, + bid_price: ItemPrice, + ) -> DispatchResult { + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(details.owner != buyer, Error::::NoPermission); + + let price_info = + ItemPriceOf::::get(&collection, &item).ok_or(Error::::NotForSale)?; + + ensure!(bid_price >= price_info.0, Error::::BidTooLow); + + if let Some(only_buyer) = price_info.1 { + ensure!(only_buyer == buyer, Error::::NoPermission); + } + + T::Currency::transfer( + &buyer, + &details.owner, + price_info.0, + ExistenceRequirement::KeepAlive, + )?; + + let old_owner = details.owner.clone(); + + Self::do_transfer(collection, item, buyer.clone(), |_, _| Ok(()))?; + + Self::deposit_event(Event::ItemBought { + collection, + item, + price: price_info.0, + seller: old_owner, + buyer, + }); + + Ok(()) + } } diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 5c7adeac0eff2..70f10ca4f8b39 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -24,6 +24,7 @@ //! * [`System`](../frame_system/index.html) //! * [`Support`](../frame_support/index.html) +#![recursion_limit = "256"] // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -42,8 +43,11 @@ pub mod migration; pub mod weights; use codec::{Decode, Encode}; -use frame_support::traits::{ - tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency, +use frame_support::{ + traits::{ + tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency, + }, + transactional, }; use frame_system::Config as SystemConfig; use sp_runtime::{ @@ -56,6 +60,8 @@ pub use pallet::*; pub use types::*; pub use weights::WeightInfo; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -245,6 +251,18 @@ pub mod pallet { OptionQuery, >; + #[pallet::storage] + /// Price of an asset instance. + pub(super) type ItemPriceOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + (ItemPrice, Option), + OptionQuery, + >; + #[pallet::storage] /// Keeps track of the number of items a collection might have. pub(super) type CollectionMaxSupply, I: 'static = ()> = @@ -341,6 +359,23 @@ pub mod pallet { OwnershipAcceptanceChanged { who: T::AccountId, maybe_collection: Option }, /// Max supply has been set for a collection. CollectionMaxSupplySet { collection: T::CollectionId, max_supply: u32 }, + /// The price was set for the instance. + ItemPriceSet { + collection: T::CollectionId, + item: T::ItemId, + price: ItemPrice, + whitelisted_buyer: Option, + }, + /// The price for the instance was removed. + ItemPriceRemoved { collection: T::CollectionId, item: T::ItemId }, + /// An item was bought. + ItemBought { + collection: T::CollectionId, + item: T::ItemId, + price: ItemPrice, + seller: T::AccountId, + buyer: T::AccountId, + }, } #[pallet::error] @@ -375,6 +410,12 @@ pub mod pallet { MaxSupplyAlreadySet, /// The provided max supply is less to the amount of items a collection already has. MaxSupplyTooSmall, + /// The given item ID is unknown. + UnknownItem, + /// Item is not for sale. + NotForSale, + /// The provided bid is too low. + BidTooLow, } impl, I: 'static> Pallet { @@ -411,7 +452,7 @@ pub mod pallet { pub fn create( origin: OriginFor, collection: T::CollectionId, - admin: ::Source, + admin: AccountIdLookupOf, ) -> DispatchResult { let owner = T::CreateOrigin::ensure_origin(origin, &collection)?; let admin = T::Lookup::lookup(admin)?; @@ -447,7 +488,7 @@ pub mod pallet { pub fn force_create( origin: OriginFor, collection: T::CollectionId, - owner: ::Source, + owner: AccountIdLookupOf, free_holding: bool, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; @@ -518,7 +559,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - owner: ::Source, + owner: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; @@ -547,7 +588,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - check_owner: Option<::Source>, + check_owner: Option>, ) -> DispatchResult { let origin = ensure_signed(origin)?; let check_owner = check_owner.map(T::Lookup::lookup).transpose()?; @@ -583,7 +624,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - dest: ::Source, + dest: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; @@ -795,7 +836,7 @@ pub mod pallet { pub fn transfer_ownership( origin: OriginFor, collection: T::CollectionId, - owner: ::Source, + owner: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; @@ -843,9 +884,9 @@ pub mod pallet { pub fn set_team( origin: OriginFor, collection: T::CollectionId, - issuer: ::Source, - admin: ::Source, - freezer: ::Source, + issuer: AccountIdLookupOf, + admin: AccountIdLookupOf, + freezer: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; let issuer = T::Lookup::lookup(issuer)?; @@ -881,7 +922,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - delegate: ::Source, + delegate: AccountIdLookupOf, ) -> DispatchResult { let maybe_check: Option = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -934,7 +975,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - maybe_check_delegate: Option<::Source>, + maybe_check_delegate: Option>, ) -> DispatchResult { let maybe_check: Option = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -985,10 +1026,10 @@ pub mod pallet { pub fn force_item_status( origin: OriginFor, collection: T::CollectionId, - owner: ::Source, - issuer: ::Source, - admin: ::Source, - freezer: ::Source, + owner: AccountIdLookupOf, + issuer: AccountIdLookupOf, + admin: AccountIdLookupOf, + freezer: AccountIdLookupOf, free_holding: bool, is_frozen: bool, ) -> DispatchResult { @@ -1408,5 +1449,50 @@ pub mod pallet { Self::deposit_event(Event::CollectionMaxSupplySet { collection, max_supply }); Ok(()) } + + /// Set (or reset) the price for an item. + /// + /// Origin must be Signed and must be the owner of the asset `item`. + /// + /// - `collection`: The collection of the item. + /// - `item`: The item to set the price for. + /// - `price`: The price for the item. Pass `None`, to reset the price. + /// - `buyer`: Restricts the buy operation to a specific account. + /// + /// Emits `ItemPriceSet` on success if the price is not `None`. + /// Emits `ItemPriceRemoved` on success if the price is `None`. + #[pallet::weight(T::WeightInfo::set_price())] + pub fn set_price( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + price: Option>, + whitelisted_buyer: Option>, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let whitelisted_buyer = whitelisted_buyer.map(T::Lookup::lookup).transpose()?; + Self::do_set_price(collection, item, origin, price, whitelisted_buyer) + } + + /// Allows to buy an item if it's up for sale. + /// + /// Origin must be Signed and must not be the owner of the `item`. + /// + /// - `collection`: The collection of the item. + /// - `item`: The item the sender wants to buy. + /// - `bid_price`: The price the sender is willing to pay. + /// + /// Emits `ItemBought` on success. + #[pallet::weight(T::WeightInfo::buy_item())] + #[transactional] + pub fn buy_item( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + bid_price: ItemPrice, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_buy_item(collection, item, origin, bid_price) + } } } diff --git a/frame/uniques/src/migration.rs b/frame/uniques/src/migration.rs index d301f0a3d1eb1..8a2a0ef808d90 100644 --- a/frame/uniques/src/migration.rs +++ b/frame/uniques/src/migration.rs @@ -17,10 +17,7 @@ //! Various pieces of common functionality. use super::*; -use frame_support::{ - traits::{Get, GetStorageVersion, PalletInfoAccess, StorageVersion}, - weights::Weight, -}; +use frame_support::traits::{Get, GetStorageVersion, PalletInfoAccess, StorageVersion}; /// Migrate the pallet storage to v1. pub fn migrate_to_v1, I: 'static, P: GetStorageVersion + PalletInfoAccess>( @@ -45,7 +42,7 @@ pub fn migrate_to_v1, I: 'static, P: GetStorageVersion + PalletInfo on_chain_storage_version, ); // calculate and return migration weights - T::DbWeight::get().reads_writes(count as Weight + 1, count as Weight + 1) + T::DbWeight::get().reads_writes(count as u64 + 1, count as u64 + 1) } else { log::warn!( target: "runtime::uniques", diff --git a/frame/uniques/src/tests.rs b/frame/uniques/src/tests.rs index ef28a2143e84a..8b1d00d7ba0c7 100644 --- a/frame/uniques/src/tests.rs +++ b/frame/uniques/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for Uniques pallet. use crate::{mock::*, Event, *}; -use frame_support::{assert_noop, assert_ok, traits::Currency}; +use frame_support::{assert_noop, assert_ok, dispatch::Dispatchable, traits::Currency}; use pallet_balances::Error as BalancesError; use sp_std::prelude::*; @@ -694,3 +694,179 @@ fn max_supply_should_work() { assert!(!CollectionMaxSupply::::contains_key(collection_id)); }); } + +#[test] +fn set_price_should_work() { + new_test_ext().execute_with(|| { + let user_id = 1; + let collection_id = 0; + let item_1 = 1; + let item_2 = 2; + + assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_id, true)); + + assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_1, user_id)); + assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_2, user_id)); + + assert_ok!(Uniques::set_price( + Origin::signed(user_id), + collection_id, + item_1, + Some(1), + None, + )); + + assert_ok!(Uniques::set_price( + Origin::signed(user_id), + collection_id, + item_2, + Some(2), + Some(3) + )); + + let item = ItemPriceOf::::get(collection_id, item_1).unwrap(); + assert_eq!(item.0, 1); + assert_eq!(item.1, None); + + let item = ItemPriceOf::::get(collection_id, item_2).unwrap(); + assert_eq!(item.0, 2); + assert_eq!(item.1, Some(3)); + + assert!(events().contains(&Event::::ItemPriceSet { + collection: collection_id, + item: item_1, + price: 1, + whitelisted_buyer: None, + })); + + // validate we can unset the price + assert_ok!(Uniques::set_price(Origin::signed(user_id), collection_id, item_2, None, None)); + assert!(events().contains(&Event::::ItemPriceRemoved { + collection: collection_id, + item: item_2 + })); + assert!(!ItemPriceOf::::contains_key(collection_id, item_2)); + }); +} + +#[test] +fn buy_item_should_work() { + new_test_ext().execute_with(|| { + let user_1 = 1; + let user_2 = 2; + let user_3 = 3; + let collection_id = 0; + let item_1 = 1; + let item_2 = 2; + let item_3 = 3; + let price_1 = 20; + let price_2 = 30; + let initial_balance = 100; + + Balances::make_free_balance_be(&user_1, initial_balance); + Balances::make_free_balance_be(&user_2, initial_balance); + Balances::make_free_balance_be(&user_3, initial_balance); + + assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_1, true)); + + assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_1, user_1)); + assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_2, user_1)); + assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_3, user_1)); + + assert_ok!(Uniques::set_price( + Origin::signed(user_1), + collection_id, + item_1, + Some(price_1), + None, + )); + + assert_ok!(Uniques::set_price( + Origin::signed(user_1), + collection_id, + item_2, + Some(price_2), + Some(user_3), + )); + + // can't buy for less + assert_noop!( + Uniques::buy_item(Origin::signed(user_2), collection_id, item_1, 1), + Error::::BidTooLow + ); + + // pass the higher price to validate it will still deduct correctly + assert_ok!(Uniques::buy_item(Origin::signed(user_2), collection_id, item_1, price_1 + 1,)); + + // validate the new owner & balances + let item = Item::::get(collection_id, item_1).unwrap(); + assert_eq!(item.owner, user_2); + assert_eq!(Balances::total_balance(&user_1), initial_balance + price_1); + assert_eq!(Balances::total_balance(&user_2), initial_balance - price_1); + + // can't buy from yourself + assert_noop!( + Uniques::buy_item(Origin::signed(user_1), collection_id, item_2, price_2), + Error::::NoPermission + ); + + // can't buy when the item is listed for a specific buyer + assert_noop!( + Uniques::buy_item(Origin::signed(user_2), collection_id, item_2, price_2), + Error::::NoPermission + ); + + // can buy when I'm a whitelisted buyer + assert_ok!(Uniques::buy_item(Origin::signed(user_3), collection_id, item_2, price_2,)); + + assert!(events().contains(&Event::::ItemBought { + collection: collection_id, + item: item_2, + price: price_2, + seller: user_1, + buyer: user_3, + })); + + // ensure we reset the buyer field + assert!(!ItemPriceOf::::contains_key(collection_id, item_2)); + + // can't buy when item is not for sale + assert_noop!( + Uniques::buy_item(Origin::signed(user_2), collection_id, item_3, price_2), + Error::::NotForSale + ); + + // ensure we can't buy an item when the collection or an item is frozen + { + assert_ok!(Uniques::set_price( + Origin::signed(user_1), + collection_id, + item_3, + Some(price_1), + None, + )); + + // freeze collection + assert_ok!(Uniques::freeze_collection(Origin::signed(user_1), collection_id)); + + let buy_item_call = mock::Call::Uniques(crate::Call::::buy_item { + collection: collection_id, + item: item_3, + bid_price: price_1, + }); + assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); + + assert_ok!(Uniques::thaw_collection(Origin::signed(user_1), collection_id)); + + // freeze item + assert_ok!(Uniques::freeze(Origin::signed(user_1), collection_id, item_3)); + + let buy_item_call = mock::Call::Uniques(crate::Call::::buy_item { + collection: collection_id, + item: item_3, + bid_price: price_1, + }); + assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); + } + }); +} diff --git a/frame/uniques/src/types.rs b/frame/uniques/src/types.rs index d7706fd7e3a58..98e056163d28d 100644 --- a/frame/uniques/src/types.rs +++ b/frame/uniques/src/types.rs @@ -30,6 +30,8 @@ pub(super) type CollectionDetailsFor = CollectionDetails<::AccountId, DepositBalanceOf>; pub(super) type ItemDetailsFor = ItemDetails<::AccountId, DepositBalanceOf>; +pub(super) type ItemPrice = + <>::Currency as Currency<::AccountId>>::Balance; #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CollectionDetails { diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index d885077a8dee9..4ed01e463cc86 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -18,22 +18,22 @@ //! Autogenerated weights for pallet_uniques //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-06-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-07-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `test-bench-bot`, CPU: `Intel(R) Xeon(R) CPU @ 3.10GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: // target/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_uniques // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 +// --pallet=pallet_uniques +// --chain=dev // --output=./frame/uniques/src/weights.rs // --template=./.maintain/frame-weight-template.hbs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_uniques. @@ -70,6 +70,8 @@ pub trait WeightInfo { fn cancel_approval() -> Weight; fn set_accept_ownership() -> Weight; fn set_collection_max_supply() -> Weight; + fn set_price() -> Weight; + fn buy_item() -> Weight; } /// Weights for pallet_uniques using the Substrate node and recommended hardware. @@ -78,16 +80,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - (27_715_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(33_075_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - (16_929_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(19_528_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -101,174 +103,192 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 16_000 - .saturating_add((10_481_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 16_000 - .saturating_add((1_762_000 as Weight).saturating_mul(m as Weight)) - // Standard Error: 16_000 - .saturating_add((1_590_000 as Weight).saturating_mul(a as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(m as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + Weight::from_ref_time(0 as RefTimeWeight) + // Standard Error: 25_000 + .saturating_add(Weight::from_ref_time(13_639_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + // Standard Error: 25_000 + .saturating_add(Weight::from_ref_time(2_393_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + // Standard Error: 25_000 + .saturating_add(Weight::from_ref_time(2_217_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - (36_577_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(42_146_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:1) + // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - (36_124_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(42_960_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:2) + // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - (27_225_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(33_025_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:100 w:100) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 14_000 - .saturating_add((12_407_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + Weight::from_ref_time(0 as RefTimeWeight) + // Standard Error: 24_000 + .saturating_add(Weight::from_ref_time(15_540_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - (21_452_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_194_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - (22_155_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_397_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - (16_897_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_278_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - (16_657_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_304_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - (25_057_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(28_615_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - (17_253_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_943_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - (20_010_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(22_583_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - (41_159_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(47_520_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - (39_598_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(45_316_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - (33_387_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(38_391_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - (33_208_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(38_023_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - (32_619_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_398_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - (31_028_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(35_621_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - (22_263_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_856_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - (22_910_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_098_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - (20_716_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(24_076_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - (19_380_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_035_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + } + // Storage: Uniques Asset (r:1 w:0) + // Storage: Uniques ItemPriceOf (r:0 w:1) + fn set_price() -> Weight { + Weight::from_ref_time(22_534_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques ItemPriceOf (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Account (r:0 w:2) + fn buy_item() -> Weight { + Weight::from_ref_time(45_272_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } } @@ -277,16 +297,16 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - (27_715_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(33_075_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - (16_929_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(19_528_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -300,173 +320,191 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 16_000 - .saturating_add((10_481_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 16_000 - .saturating_add((1_762_000 as Weight).saturating_mul(m as Weight)) - // Standard Error: 16_000 - .saturating_add((1_590_000 as Weight).saturating_mul(a as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(m as Weight))) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + Weight::from_ref_time(0 as RefTimeWeight) + // Standard Error: 25_000 + .saturating_add(Weight::from_ref_time(13_639_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + // Standard Error: 25_000 + .saturating_add(Weight::from_ref_time(2_393_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + // Standard Error: 25_000 + .saturating_add(Weight::from_ref_time(2_217_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - (36_577_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(42_146_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:1) + // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - (36_124_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(42_960_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:2) + // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - (27_225_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(33_025_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:100 w:100) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 14_000 - .saturating_add((12_407_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + Weight::from_ref_time(0 as RefTimeWeight) + // Standard Error: 24_000 + .saturating_add(Weight::from_ref_time(15_540_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - (21_452_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_194_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - (22_155_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_397_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - (16_897_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_278_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - (16_657_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_304_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - (25_057_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(28_615_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - (17_253_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_943_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - (20_010_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(22_583_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - (41_159_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(47_520_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - (39_598_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(45_316_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - (33_387_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(38_391_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - (33_208_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(38_023_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - (32_619_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_398_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - (31_028_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(35_621_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - (22_263_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_856_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - (22_910_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_098_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - (20_716_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(24_076_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - (19_380_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_035_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + } + // Storage: Uniques Asset (r:1 w:0) + // Storage: Uniques ItemPriceOf (r:0 w:1) + fn set_price() -> Weight { + Weight::from_ref_time(22_534_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques ItemPriceOf (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Account (r:0 w:2) + fn buy_item() -> Weight { + Weight::from_ref_time(45_272_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } } diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 0aae2615702dd..3df12d69e84eb 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -185,7 +185,7 @@ pub mod pallet { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) - .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) + .fold(Weight::new(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() @@ -208,7 +208,7 @@ pub mod pallet { ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); // Track the actual weight of each of the batch calls. - let mut weight: Weight = 0; + let mut weight = Weight::new(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, don't apply any dispatch filters; root can call anything. @@ -253,9 +253,9 @@ pub mod pallet { let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::as_derivative() - .saturating_add(dispatch_info.weight) // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(dispatch_info.weight), dispatch_info.class, ) })] @@ -301,7 +301,7 @@ pub mod pallet { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) - .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) + .fold(Weight::new(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() @@ -324,7 +324,7 @@ pub mod pallet { ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); // Track the actual weight of each of the batch calls. - let mut weight: Weight = 0; + let mut weight = Weight::new(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, bypass any dispatch filter; root can call anything. @@ -352,7 +352,7 @@ pub mod pallet { } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch_all(calls_len as u32); - Ok(Some(base_weight + weight).into()) + Ok(Some(base_weight.saturating_add(weight)).into()) } /// Dispatches a function call with a provided origin. @@ -406,7 +406,7 @@ pub mod pallet { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) - .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) + .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() @@ -429,7 +429,7 @@ pub mod pallet { ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); // Track the actual weight of each of the batch calls. - let mut weight: Weight = 0; + let mut weight = Weight::new(); // Track failed dispatch occur. let mut has_error: bool = false; for call in calls.into_iter() { @@ -455,7 +455,7 @@ pub mod pallet { Self::deposit_event(Event::BatchCompleted); } let base_weight = T::WeightInfo::batch(calls_len as u32); - Ok(Some(base_weight + weight).into()) + Ok(Some(base_weight.saturating_add(weight)).into()) } } } diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 6368473ac8708..e3b16b5244fc0 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -99,7 +99,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::max_value()); + frame_system::limits::BlockWeights::simple_max(Weight::MAX); } impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; @@ -191,7 +191,7 @@ fn call_transfer(dest: u64, value: u64) -> Call { Call::Balances(BalancesCall::transfer { dest, value }) } -fn call_foobar(err: bool, start_weight: u64, end_weight: Option) -> Call { +fn call_foobar(err: bool, start_weight: Weight, end_weight: Option) -> Call { Call::Example(ExampleCall::foobar { err, start_weight, end_weight }) } @@ -213,8 +213,8 @@ fn as_derivative_works() { #[test] fn as_derivative_handles_weight_refund() { new_test_ext().execute_with(|| { - let start_weight = 100; - let end_weight = 75; + let start_weight = Weight::from_ref_time(100); + let end_weight = Weight::from_ref_time(75); let diff = start_weight - end_weight; // Full weight when ok @@ -364,24 +364,24 @@ fn batch_weight_calculation_doesnt_overflow() { use sp_runtime::Perbill; new_test_ext().execute_with(|| { let big_call = Call::System(SystemCall::fill_block { ratio: Perbill::from_percent(50) }); - assert_eq!(big_call.get_dispatch_info().weight, Weight::max_value() / 2); + assert_eq!(big_call.get_dispatch_info().weight, Weight::MAX / 2); // 3 * 50% saturates to 100% let batch_call = Call::Utility(crate::Call::batch { calls: vec![big_call.clone(), big_call.clone(), big_call.clone()], }); - assert_eq!(batch_call.get_dispatch_info().weight, Weight::max_value()); + assert_eq!(batch_call.get_dispatch_info().weight, Weight::MAX); }); } #[test] fn batch_handles_weight_refund() { new_test_ext().execute_with(|| { - let start_weight = 100; - let end_weight = 75; + let start_weight = Weight::from_ref_time(100); + let end_weight = Weight::from_ref_time(75); let diff = start_weight - end_weight; - let batch_len: Weight = 4; + let batch_len = 4; // Full weight when ok let inner_call = call_foobar(false, start_weight, None); @@ -420,7 +420,7 @@ fn batch_handles_weight_refund() { let good_call = call_foobar(false, start_weight, Some(end_weight)); let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call]; - let batch_len = batch_calls.len() as Weight; + let batch_len = Weight::from_ref_time(batch_calls.len() as u64); let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); @@ -494,10 +494,10 @@ fn batch_all_revert() { #[test] fn batch_all_handles_weight_refund() { new_test_ext().execute_with(|| { - let start_weight = 100; - let end_weight = 75; + let start_weight = Weight::from_ref_time(100); + let end_weight = Weight::from_ref_time(75); let diff = start_weight - end_weight; - let batch_len: Weight = 4; + let batch_len = 4; // Full weight when ok let inner_call = call_foobar(false, start_weight, None); @@ -533,7 +533,7 @@ fn batch_all_handles_weight_refund() { let good_call = call_foobar(false, start_weight, Some(end_weight)); let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call]; - let batch_len = batch_calls.len() as Weight; + let batch_len = Weight::from_ref_time(batch_calls.len() as u64); let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); @@ -616,7 +616,7 @@ fn force_batch_works() { Origin::signed(1), vec![ call_transfer(2, 5), - call_foobar(true, 75, None), + call_foobar(true, Weight::from_ref_time(75), None), call_transfer(2, 10), call_transfer(2, 5), ] diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs index 3660a54fb6a8f..0f0d171d8d4ee 100644 --- a/frame/utility/src/weights.rs +++ b/frame/utility/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_utility. @@ -58,27 +58,27 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { - (23_113_000 as Weight) + Weight::from_ref_time(23_113_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((2_701_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(2_701_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) } fn as_derivative() -> Weight { - (4_182_000 as Weight) + Weight::from_ref_time(4_182_000 as RefTimeWeight) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { - (18_682_000 as Weight) + Weight::from_ref_time(18_682_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((2_794_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(2_794_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) } fn dispatch_as() -> Weight { - (12_049_000 as Weight) + Weight::from_ref_time(12_049_000 as RefTimeWeight) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { - (19_136_000 as Weight) + Weight::from_ref_time(19_136_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((2_697_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(2_697_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) } } @@ -86,26 +86,26 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { - (23_113_000 as Weight) + Weight::from_ref_time(23_113_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((2_701_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(2_701_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) } fn as_derivative() -> Weight { - (4_182_000 as Weight) + Weight::from_ref_time(4_182_000 as RefTimeWeight) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { - (18_682_000 as Weight) + Weight::from_ref_time(18_682_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((2_794_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(2_794_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) } fn dispatch_as() -> Weight { - (12_049_000 as Weight) + Weight::from_ref_time(12_049_000 as RefTimeWeight) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { - (19_136_000 as Weight) + Weight::from_ref_time(19_136_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((2_697_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(2_697_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) } } diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 2b8150e995240..dde5fe3ac7561 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -42,7 +42,7 @@ fn add_locks(who: &T::AccountId, n: u8) { } fn add_vesting_schedules( - target: ::Source, + target: AccountIdLookupOf, n: u32, ) -> Result, &'static str> { let min_transfer = T::MinVestedTransfer::get(); @@ -52,7 +52,7 @@ fn add_vesting_schedules( let starting_block = 1u32; let source: T::AccountId = account("source", 0, SEED); - let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); + let source_lookup = T::Lookup::unlookup(source.clone()); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); System::::set_block_number(T::BlockNumber::zero()); @@ -81,7 +81,7 @@ benchmarks! { let s in 1 .. T::MAX_VESTING_SCHEDULES; let caller: T::AccountId = whitelisted_caller(); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); add_locks::(&caller, l as u8); @@ -109,7 +109,7 @@ benchmarks! { let s in 1 .. T::MAX_VESTING_SCHEDULES; let caller: T::AccountId = whitelisted_caller(); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); add_locks::(&caller, l as u8); @@ -137,7 +137,7 @@ benchmarks! { let s in 1 .. T::MAX_VESTING_SCHEDULES; let other: T::AccountId = account("other", 0, SEED); - let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); + let other_lookup = T::Lookup::unlookup(other.clone()); add_locks::(&other, l as u8); let expected_balance = add_vesting_schedules::(other_lookup.clone(), s)?; @@ -166,7 +166,7 @@ benchmarks! { let s in 1 .. T::MAX_VESTING_SCHEDULES; let other: T::AccountId = account("other", 0, SEED); - let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); + let other_lookup = T::Lookup::unlookup(other.clone()); add_locks::(&other, l as u8); add_vesting_schedules::(other_lookup.clone(), s)?; @@ -198,7 +198,7 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let target: T::AccountId = account("target", 0, SEED); - let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); + let target_lookup = T::Lookup::unlookup(target.clone()); // Give target existing locks add_locks::(&target, l as u8); // Add one vesting schedules. @@ -232,11 +232,11 @@ benchmarks! { let s in 0 .. T::MAX_VESTING_SCHEDULES - 1; let source: T::AccountId = account("source", 0, SEED); - let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); + let source_lookup = T::Lookup::unlookup(source.clone()); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); let target: T::AccountId = account("target", 0, SEED); - let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); + let target_lookup = T::Lookup::unlookup(target.clone()); // Give target existing locks add_locks::(&target, l as u8); // Add one less than max vesting schedules @@ -270,7 +270,7 @@ benchmarks! { let s in 2 .. T::MAX_VESTING_SCHEDULES; let caller: T::AccountId = account("caller", 0, SEED); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_lookup = T::Lookup::unlookup(caller.clone()); // Give target existing locks. add_locks::(&caller, l as u8); // Add max vesting schedules. @@ -320,7 +320,7 @@ benchmarks! { let test_dest: T::AccountId = account("test_dest", 0, SEED); let caller: T::AccountId = account("caller", 0, SEED); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_lookup = T::Lookup::unlookup(caller.clone()); // Give target other locks. add_locks::(&caller, l as u8); // Add max vesting schedules. diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 9fb7eb8037916..8ac625e775e4f 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -84,6 +84,7 @@ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type MaxLocksOf = <::Currency as LockableCurrency<::AccountId>>::MaxLocks; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; const VESTING_ID: LockIdentifier = *b"vesting "; @@ -321,10 +322,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::vest_other_locked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) .max(T::WeightInfo::vest_other_unlocked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES)) )] - pub fn vest_other( - origin: OriginFor, - target: ::Source, - ) -> DispatchResult { + pub fn vest_other(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { ensure_signed(origin)?; let who = T::Lookup::lookup(target)?; Self::do_vest(who) @@ -352,7 +350,7 @@ pub mod pallet { )] pub fn vested_transfer( origin: OriginFor, - target: ::Source, + target: AccountIdLookupOf, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { let transactor = ensure_signed(origin)?; @@ -383,8 +381,8 @@ pub mod pallet { )] pub fn force_vested_transfer( origin: OriginFor, - source: ::Source, - target: ::Source, + source: AccountIdLookupOf, + target: AccountIdLookupOf, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { ensure_root(origin)?; @@ -494,8 +492,8 @@ impl Pallet { // Execute a vested transfer from `source` to `target` with the given `schedule`. fn do_vested_transfer( - source: ::Source, - target: ::Source, + source: AccountIdLookupOf, + target: AccountIdLookupOf, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { // Validate user inputs. diff --git a/frame/vesting/src/mock.rs b/frame/vesting/src/mock.rs index 9ad8e57500e89..8875404f03fa2 100644 --- a/frame/vesting/src/mock.rs +++ b/frame/vesting/src/mock.rs @@ -45,7 +45,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs index 4596157e63b7b..bd3af72cdd182 100644 --- a/frame/vesting/src/weights.rs +++ b/frame/vesting/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_vesting. @@ -60,96 +60,96 @@ impl WeightInfo for SubstrateWeight { // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vest_locked(l: u32, s: u32, ) -> Weight { - (32_978_000 as Weight) + Weight::from_ref_time(32_978_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((82_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(82_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((88_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(88_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vest_unlocked(l: u32, s: u32, ) -> Weight { - (32_856_000 as Weight) + Weight::from_ref_time(32_856_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((79_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn vest_other_locked(l: u32, s: u32, ) -> Weight { - (33_522_000 as Weight) + Weight::from_ref_time(33_522_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((74_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(74_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((72_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(72_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { - (32_558_000 as Weight) + Weight::from_ref_time(32_558_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((61_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(61_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vested_transfer(l: u32, s: u32, ) -> Weight { - (49_260_000 as Weight) + Weight::from_ref_time(49_260_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((80_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add((55_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:1 w:1) fn force_vested_transfer(l: u32, s: u32, ) -> Weight { - (49_166_000 as Weight) + Weight::from_ref_time(49_166_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((77_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 4_000 - .saturating_add((43_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(43_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - (34_042_000 as Weight) + Weight::from_ref_time(34_042_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((83_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(83_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((80_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - (33_937_000 as Weight) + Weight::from_ref_time(33_937_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((73_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } } @@ -158,95 +158,95 @@ impl WeightInfo for () { // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vest_locked(l: u32, s: u32, ) -> Weight { - (32_978_000 as Weight) + Weight::from_ref_time(32_978_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((82_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(82_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((88_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(88_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vest_unlocked(l: u32, s: u32, ) -> Weight { - (32_856_000 as Weight) + Weight::from_ref_time(32_856_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((79_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn vest_other_locked(l: u32, s: u32, ) -> Weight { - (33_522_000 as Weight) + Weight::from_ref_time(33_522_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((74_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(74_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((72_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(72_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { - (32_558_000 as Weight) + Weight::from_ref_time(32_558_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((61_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(61_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vested_transfer(l: u32, s: u32, ) -> Weight { - (49_260_000 as Weight) + Weight::from_ref_time(49_260_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((80_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add((55_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:1 w:1) fn force_vested_transfer(l: u32, s: u32, ) -> Weight { - (49_166_000 as Weight) + Weight::from_ref_time(49_166_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((77_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 4_000 - .saturating_add((43_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(43_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - (34_042_000 as Weight) + Weight::from_ref_time(34_042_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((83_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(83_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((80_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - (33_937_000 as Weight) + Weight::from_ref_time(33_937_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((73_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } } diff --git a/frame/whitelist/src/lib.rs b/frame/whitelist/src/lib.rs index 239f0fd280160..5fb4e3aeb9ba6 100644 --- a/frame/whitelist/src/lib.rs +++ b/frame/whitelist/src/lib.rs @@ -38,6 +38,7 @@ mod mock; #[cfg(test)] mod tests; pub mod weights; +pub use weights::WeightInfo; use codec::{DecodeLimit, Encode, FullCodec}; use frame_support::{ @@ -54,7 +55,6 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; - use crate::weights::WeightInfo; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; diff --git a/frame/whitelist/src/mock.rs b/frame/whitelist/src/mock.rs index 5b6274925daea..8687e689b81f2 100644 --- a/frame/whitelist/src/mock.rs +++ b/frame/whitelist/src/mock.rs @@ -51,7 +51,7 @@ construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = Nothing; diff --git a/frame/whitelist/src/tests.rs b/frame/whitelist/src/tests.rs index 67bccaeaeebe1..3e20cd29efb4f 100644 --- a/frame/whitelist/src/tests.rs +++ b/frame/whitelist/src/tests.rs @@ -19,7 +19,9 @@ use crate::mock::*; use codec::Encode; -use frame_support::{assert_noop, assert_ok, dispatch::GetDispatchInfo, traits::PreimageProvider}; +use frame_support::{ + assert_noop, assert_ok, dispatch::GetDispatchInfo, traits::PreimageProvider, weights::Weight, +}; use sp_runtime::{traits::Hash, DispatchError}; #[test] @@ -94,7 +96,11 @@ fn test_whitelist_call_and_execute() { assert!(Preimage::preimage_requested(&call_hash)); assert_noop!( - Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight - 1), + Whitelist::dispatch_whitelisted_call( + Origin::root(), + call_hash, + call_weight - Weight::one() + ), crate::Error::::InvalidCallWeightWitness, ); @@ -114,7 +120,7 @@ fn test_whitelist_call_and_execute_failing_call() { new_test_ext().execute_with(|| { let call = Call::Whitelist(crate::Call::dispatch_whitelisted_call { call_hash: Default::default(), - call_weight_witness: 0, + call_weight_witness: Weight::zero(), }); let call_weight = call.get_dispatch_info().weight; let encoded_call = call.encode(); diff --git a/frame/whitelist/src/weights.rs b/frame/whitelist/src/weights.rs index 81482c35e3de8..bb2ed9700c833 100644 --- a/frame/whitelist/src/weights.rs +++ b/frame/whitelist/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_whitelist. @@ -56,35 +56,35 @@ impl WeightInfo for SubstrateWeight { // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn whitelist_call() -> Weight { - (20_938_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(20_938_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn remove_whitelisted_call() -> Weight { - (22_332_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(22_332_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn dispatch_whitelisted_call() -> Weight { - (5_989_917_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(5_989_917_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { - (25_325_000 as Weight) + Weight::from_ref_time(25_325_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } } @@ -93,34 +93,34 @@ impl WeightInfo for () { // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn whitelist_call() -> Weight { - (20_938_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(20_938_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn remove_whitelisted_call() -> Weight { - (22_332_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(22_332_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn dispatch_whitelisted_call() -> Weight { - (5_989_917_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(5_989_917_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { - (25_325_000 as Weight) + Weight::from_ref_time(25_325_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } } diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index f3d091266d5d4..75c935bf844bd 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -20,6 +20,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } sp-version = { version = "5.0.0", default-features = false, path = "../version" } sp-state-machine = { version = "0.12.0", optional = true, path = "../state-machine" } +sp-trie = { version = "6.0.0", optional = true, path = "../trie" } hash-db = { version = "0.15.2", optional = true } thiserror = { version = "1.0.30", optional = true } @@ -36,6 +37,7 @@ std = [ "sp-std/std", "sp-runtime/std", "sp-state-machine", + "sp-trie", "sp-version/std", "hash-db", "thiserror", diff --git a/primitives/api/proc-macro/src/common.rs b/primitives/api/proc-macro/src/common.rs new file mode 100644 index 0000000000000..10887be613278 --- /dev/null +++ b/primitives/api/proc-macro/src/common.rs @@ -0,0 +1,41 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2024 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// The ident used for the block generic parameter. +pub const BLOCK_GENERIC_IDENT: &str = "Block"; + +/// Unique identifier used to make the hidden includes unique for this macro. +pub const HIDDEN_INCLUDES_ID: &str = "DECL_RUNTIME_APIS"; + +/// The `core_trait` attribute. +pub const CORE_TRAIT_ATTRIBUTE: &str = "core_trait"; +/// The `api_version` attribute. +/// +/// Is used to set the current version of the trait. +pub const API_VERSION_ATTRIBUTE: &str = "api_version"; +/// The `changed_in` attribute. +/// +/// Is used when the function signature changed between different versions of a trait. +/// This attribute should be placed on the old signature of the function. +pub const CHANGED_IN_ATTRIBUTE: &str = "changed_in"; +/// The `renamed` attribute. +/// +/// Is used when a trait method was renamed. +pub const RENAMED_ATTRIBUTE: &str = "renamed"; +/// All attributes that we support in the declaration of a runtime api trait. +pub const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = + &[CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, RENAMED_ATTRIBUTE]; diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index b031c0f8bb1cc..aac4491720c34 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -16,11 +16,15 @@ // limitations under the License. use crate::utils::{ - extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, - generate_call_api_at_fn_name, generate_crate_access, generate_hidden_includes, - generate_method_runtime_api_impl_name, generate_native_call_generator_fn_name, - generate_runtime_mod_name_for_trait, prefix_function_with_trait, - replace_wild_card_parameter_names, return_type_extract_type, AllowSelfRefInParameters, + extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, generate_crate_access, + generate_hidden_includes, generate_runtime_mod_name_for_trait, parse_runtime_api_version, + prefix_function_with_trait, replace_wild_card_parameter_names, return_type_extract_type, + versioned_trait_name, AllowSelfRefInParameters, +}; + +use crate::common::{ + API_VERSION_ATTRIBUTE, BLOCK_GENERIC_IDENT, CHANGED_IN_ATTRIBUTE, CORE_TRAIT_ATTRIBUTE, + HIDDEN_INCLUDES_ID, RENAMED_ATTRIBUTE, SUPPORTED_ATTRIBUTE_NAMES, }; use proc_macro2::{Span, TokenStream}; @@ -33,36 +37,11 @@ use syn::{ parse_macro_input, parse_quote, spanned::Spanned, visit::{self, Visit}, - Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, ReturnType, - TraitBound, TraitItem, TraitItemMethod, Type, + Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, TraitBound, + TraitItem, TraitItemMethod, }; -use std::collections::HashMap; - -/// The ident used for the block generic parameter. -const BLOCK_GENERIC_IDENT: &str = "Block"; - -/// Unique identifier used to make the hidden includes unique for this macro. -const HIDDEN_INCLUDES_ID: &str = "DECL_RUNTIME_APIS"; - -/// The `core_trait` attribute. -const CORE_TRAIT_ATTRIBUTE: &str = "core_trait"; -/// The `api_version` attribute. -/// -/// Is used to set the current version of the trait. -const API_VERSION_ATTRIBUTE: &str = "api_version"; -/// The `changed_in` attribute. -/// -/// Is used when the function signature changed between different versions of a trait. -/// This attribute should be placed on the old signature of the function. -const CHANGED_IN_ATTRIBUTE: &str = "changed_in"; -/// The `renamed` attribute. -/// -/// Is used when a trait method was renamed. -const RENAMED_ATTRIBUTE: &str = "renamed"; -/// All attributes that we support in the declaration of a runtime api trait. -const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = - &[CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, RENAMED_ATTRIBUTE]; +use std::collections::{BTreeMap, HashMap}; /// The structure used for parsing the runtime api declarations. struct RuntimeApiDecls { @@ -119,20 +98,6 @@ impl<'ast> Visit<'ast> for IsUsingBlock { } } -/// Visits the ast and checks if `Block` ident is used somewhere. -fn type_is_using_block(ty: &Type) -> bool { - let mut visitor = IsUsingBlock { result: false }; - visitor.visit_type(ty); - visitor.result -} - -/// Visits the ast and checks if `Block` ident is used somewhere. -fn return_type_is_using_block(ty: &ReturnType) -> bool { - let mut visitor = IsUsingBlock { result: false }; - visitor.visit_return_type(ty); - visitor.result -} - /// Replace all occurrences of `Block` with `NodeBlock` struct ReplaceBlockWithNodeBlock {} @@ -146,146 +111,69 @@ impl Fold for ReplaceBlockWithNodeBlock { } } -/// Replace all occurrences of `Block` with `NodeBlock` -fn fn_arg_replace_block_with_node_block(fn_arg: FnArg) -> FnArg { - let mut replace = ReplaceBlockWithNodeBlock {}; - fold::fold_fn_arg(&mut replace, fn_arg) -} - -/// Replace all occurrences of `Block` with `NodeBlock` -fn return_type_replace_block_with_node_block(return_type: ReturnType) -> ReturnType { - let mut replace = ReplaceBlockWithNodeBlock {}; - fold::fold_return_type(&mut replace, return_type) -} - -/// Generate the functions that generate the native call closure for each trait method. -fn generate_native_call_generators(decl: &ItemTrait) -> Result { - let fns = decl.items.iter().filter_map(|i| match i { - TraitItem::Method(ref m) => Some(&m.sig), - _ => None, - }); - - let mut result = Vec::new(); - let trait_ = &decl.ident; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Auxiliary function that is used to convert between types that use different block types. - // The function expects that both are convertible by encoding the one and decoding the other. - result.push(quote!( - #[cfg(any(feature = "std", test))] - fn convert_between_block_types - #crate_::ApiError>( - input: &I, - map_error: F, - ) -> std::result::Result - { - ::decode_with_depth_limit( - #crate_::MAX_EXTRINSIC_DEPTH, - &mut &#crate_::Encode::encode(input)[..], - ).map_err(map_error) +/// Versioned API traits are used to catch missing methods when implementing a specific version of a +/// versioned API. They contain all non-versioned methods (aka stable methods) from the main trait +/// and all versioned methods for the specific version. This means that there is one trait for each +/// version mentioned in the trait definition. For example: +/// ```ignore +/// // The trait version implicitly is 1 +/// decl_runtime_apis!( +/// trait SomeApi { +/// fn method1(); // this is a 'stable method' +/// +/// #[api_version(2)] +/// fn method2(); +/// +/// #[api_version(2)] +/// fn method3(); +/// +/// #[api_version(3)] +/// fn method4(); +/// } +/// ); +/// ``` +/// This trait has got three different versions. The function below will generate the following +/// code: +/// ``` +/// trait SomeApiV1 { +/// // in V1 only the stable methods are required. The rest has got default implementations. +/// fn method1(); +/// } +/// +/// trait SomeApiV2 { +/// // V2 contains all methods from V1 and V2. V3 not required so they are skipped. +/// fn method1(); +/// fn method2(); +/// fn method3(); +/// } +/// +/// trait SomeApiV3 { +/// // And V3 contains all methods from the trait. +/// fn method1(); +/// fn method2(); +/// fn method3(); +/// fn method4(); +/// } +/// ``` +fn generate_versioned_api_traits( + api: ItemTrait, + methods: BTreeMap>, +) -> Vec { + let mut result = Vec::::new(); + for (version, _) in &methods { + let mut versioned_trait = api.clone(); + versioned_trait.ident = versioned_trait_name(&versioned_trait.ident, *version); + versioned_trait.items = Vec::new(); + // Add the methods from the current version and all previous one. Versions are sorted so + // it's safe to stop early. + for (_, m) in methods.iter().take_while(|(v, _)| v <= &version) { + versioned_trait.items.extend(m.iter().cloned().map(|m| TraitItem::Method(m))); } - )); - - // Generate a native call generator for each function of the given trait. - for fn_ in fns { - let params = extract_parameter_names_types_and_borrows(fn_, AllowSelfRefInParameters::No)?; - let trait_fn_name = &fn_.ident; - let function_name_str = fn_.ident.to_string(); - let fn_name = generate_native_call_generator_fn_name(&fn_.ident); - let output = return_type_replace_block_with_node_block(fn_.output.clone()); - let output_ty = return_type_extract_type(&output); - let output = quote!( std::result::Result<#output_ty, #crate_::ApiError> ); - - // Every type that is using the `Block` generic parameter, we need to encode/decode, - // to make it compatible between the runtime/node. - let conversions = params.iter().filter(|v| type_is_using_block(&v.1)).map(|(n, t, _)| { - let param_name = quote!(#n).to_string(); - - quote!( - let #n: #t = convert_between_block_types( - &#n, - |e| #crate_::ApiError::FailedToConvertParameter { - function: #function_name_str, - parameter: #param_name, - error: e, - }, - )?; - ) - }); - // Same as for the input types, we need to check if we also need to convert the output, - // before returning it. - let output_conversion = if return_type_is_using_block(&fn_.output) { - quote!( - convert_between_block_types( - &res, - |e| #crate_::ApiError::FailedToConvertReturnValue { - function: #function_name_str, - error: e, - }, - ) - ) - } else { - quote!(Ok(res)) - }; - - let input_names = params.iter().map(|v| &v.0); - // If the type is using the block generic type, we will encode/decode it to make it - // compatible. To ensure that we forward it by ref/value, we use the value given by the - // the user. Otherwise if it is not using the block, we don't need to add anything. - let input_borrows = - params.iter().map(|v| if type_is_using_block(&v.1) { v.2 } else { None }); - - // Replace all `Block` with `NodeBlock`, add `'a` lifetime to references and collect - // all the function inputs. - let fn_inputs = fn_ - .inputs - .iter() - .map(|v| fn_arg_replace_block_with_node_block(v.clone())) - .map(|v| match v { - FnArg::Typed(ref arg) => { - let mut arg = arg.clone(); - if let Type::Reference(ref mut r) = *arg.ty { - r.lifetime = Some(parse_quote!( 'a )); - } - FnArg::Typed(arg) - }, - r => r, - }); - - let (impl_generics, ty_generics, where_clause) = decl.generics.split_for_impl(); - // We need to parse them again, to get an easy access to the actual parameters. - let impl_generics: Generics = parse_quote!( #impl_generics ); - let impl_generics_params = impl_generics.params.iter().map(|p| { - match p { - GenericParam::Type(ref ty) => { - let mut ty = ty.clone(); - ty.bounds.push(parse_quote!( 'a )); - GenericParam::Type(ty) - }, - // We should not see anything different than type params here. - r => r.clone(), - } - }); - // Generate the generator function - result.push(quote!( - #[cfg(any(feature = "std", test))] - pub fn #fn_name< - 'a, ApiImpl: #trait_ #ty_generics, NodeBlock: #crate_::BlockT - #(, #impl_generics_params)* - >( - #( #fn_inputs ),* - ) -> impl FnOnce() -> #output + 'a #where_clause { - move || { - #( #conversions )* - let res = ApiImpl::#trait_fn_name(#( #input_borrows #input_names ),*); - #output_conversion - } - } - )); + result.push(versioned_trait); } - Ok(quote!( #( #result )* )) + result } /// Try to parse the given `Attribute` as `renamed` attribute. @@ -323,126 +211,13 @@ fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { } } -/// Generate the functions that call the api at a given block for a given trait method. -fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { - let fns = decl.items.iter().filter_map(|i| match i { - TraitItem::Method(ref m) => Some((&m.attrs, &m.sig)), - _ => None, - }); - - let mut result = Vec::new(); - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Generate a native call generator for each function of the given trait. - for (attrs, fn_) in fns { - let trait_name = &decl.ident; - let trait_fn_name = prefix_function_with_trait(trait_name, &fn_.ident); - let fn_name = generate_call_api_at_fn_name(&fn_.ident); - - let attrs = remove_supported_attributes(&mut attrs.clone()); - - if attrs.contains_key(RENAMED_ATTRIBUTE) && attrs.contains_key(CHANGED_IN_ATTRIBUTE) { - return Err(Error::new( - fn_.span(), - format!( - "`{}` and `{}` are not supported at once.", - RENAMED_ATTRIBUTE, CHANGED_IN_ATTRIBUTE - ), - )) - } - - // We do not need to generate this function for a method that signature was changed. - if attrs.contains_key(CHANGED_IN_ATTRIBUTE) { - continue - } - - // Parse the renamed attributes. - let mut renames = Vec::new(); - if let Some((_, a)) = attrs.iter().find(|a| a.0 == &RENAMED_ATTRIBUTE) { - let (old_name, version) = parse_renamed_attribute(a)?; - renames.push((version, prefix_function_with_trait(trait_name, &old_name))); - } - - renames.sort_by(|l, r| r.cmp(l)); - let (versions, old_names) = renames.into_iter().fold( - (Vec::new(), Vec::new()), - |(mut versions, mut old_names), (version, old_name)| { - versions.push(version); - old_names.push(old_name); - (versions, old_names) - }, - ); - - // Generate the generator function - result.push(quote!( - #[cfg(any(feature = "std", test))] - #[allow(clippy::too_many_arguments)] - pub fn #fn_name< - R: #crate_::Encode + #crate_::Decode + std::cmp::PartialEq, - NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe, - Block: #crate_::BlockT, - T: #crate_::CallApiAt, - >( - call_runtime_at: &T, - at: &#crate_::BlockId, - args: std::vec::Vec, - changes: &std::cell::RefCell<#crate_::OverlayedChanges>, - storage_transaction_cache: &std::cell::RefCell< - #crate_::StorageTransactionCache - >, - native_call: std::option::Option, - context: #crate_::ExecutionContext, - recorder: &std::option::Option<#crate_::ProofRecorder>, - ) -> std::result::Result<#crate_::NativeOrEncoded, #crate_::ApiError> { - let version = call_runtime_at.runtime_version_at(at)?; - - #( - // Check if we need to call the function by an old name. - if version.apis.iter().any(|(s, v)| { - s == &ID && *v < #versions - }) { - let params = #crate_::CallApiAtParams::<_, fn() -> _, _> { - at, - function: #old_names, - native_call: None, - arguments: args, - overlayed_changes: changes, - storage_transaction_cache, - context, - recorder, - }; - - let ret = #crate_::CallApiAt::::call_api_at(call_runtime_at, params)?; - - return Ok(ret) - } - )* - - let params = #crate_::CallApiAtParams { - at, - function: #trait_fn_name, - native_call, - arguments: args, - overlayed_changes: changes, - storage_transaction_cache, - context, - recorder, - }; - - #crate_::CallApiAt::::call_api_at(call_runtime_at, params) - } - )); - } - - Ok(quote!( #( #result )* )) -} - /// Generate the declaration of the trait for the runtime. fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { let mut result = Vec::new(); for decl in decls { let mut decl = decl.clone(); + let decl_span = decl.span(); extend_generics_with_block(&mut decl.generics); let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); let found_attributes = remove_supported_attributes(&mut decl.attrs); @@ -450,30 +225,73 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { get_api_version(&found_attributes).map(|v| generate_runtime_api_version(v as u32))?; let id = generate_runtime_api_id(&decl.ident.to_string()); - let call_api_at_calls = generate_call_api_at_calls(&decl)?; - - // Remove methods that have the `changed_in` attribute as they are not required for the - // runtime anymore. - decl.items = decl - .items - .iter_mut() - .filter_map(|i| match i { - TraitItem::Method(ref mut method) => { - if remove_supported_attributes(&mut method.attrs) - .contains_key(CHANGED_IN_ATTRIBUTE) - { - None - } else { - // Make sure we replace all the wild card parameter names. - replace_wild_card_parameter_names(&mut method.sig); - Some(TraitItem::Method(method.clone())) - } - }, - r => Some(r.clone()), - }) - .collect(); + let trait_api_version = get_api_version(&found_attributes)?; + + let mut methods_by_version: BTreeMap> = BTreeMap::new(); + + // Process the items in the declaration. The filter_map function below does a lot of stuff + // because the method attributes are stripped at this point + decl.items.iter_mut().for_each(|i| match i { + TraitItem::Method(ref mut method) => { + let method_attrs = remove_supported_attributes(&mut method.attrs); + let mut method_version = trait_api_version; + // validate the api version for the method (if any) and generate default + // implementation for versioned methods + if let Some(version_attribute) = method_attrs.get(API_VERSION_ATTRIBUTE) { + method_version = match parse_runtime_api_version(version_attribute) { + Ok(method_api_ver) if method_api_ver < trait_api_version => { + let method_ver = method_api_ver.to_string(); + let trait_ver = trait_api_version.to_string(); + let mut err1 = Error::new( + version_attribute.span(), + format!( + "Method version `{}` is older than (or equal to) trait version `{}`.\ + Methods can't define versions older than the trait version.", + method_ver, + trait_ver, + ), + ); + + let err2 = match found_attributes.get(&API_VERSION_ATTRIBUTE) { + Some(attr) => Error::new(attr.span(), "Trait version is set here."), + None => Error::new( + decl_span, + "Trait version is not set so it is implicitly equal to 1.", + ), + }; + err1.combine(err2); + result.push(err1.to_compile_error()); + + trait_api_version + }, + Ok(method_api_ver) => method_api_ver, + Err(e) => { + result.push(e.to_compile_error()); + trait_api_version + }, + }; + } - let native_call_generators = generate_native_call_generators(&decl)?; + // Any method with the `changed_in` attribute isn't required for the runtime + // anymore. + if !method_attrs.contains_key(CHANGED_IN_ATTRIBUTE) { + // Make sure we replace all the wild card parameter names. + replace_wild_card_parameter_names(&mut method.sig); + + // partition methods by api version + methods_by_version.entry(method_version).or_default().push(method.clone()); + } + }, + _ => (), + }); + + let versioned_api_traits = generate_versioned_api_traits(decl.clone(), methods_by_version); + + let main_api_ident = decl.ident.clone(); + let versioned_ident = &versioned_api_traits + .first() + .expect("There should always be at least one version.") + .ident; result.push(quote!( #[doc(hidden)] @@ -482,15 +300,13 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { pub mod #mod_name { use super::*; - #decl + #( #versioned_api_traits )* + + pub use #versioned_ident as #main_api_ident; pub #api_version pub #id - - #native_call_generators - - #call_api_at_calls } )); } @@ -509,18 +325,45 @@ struct ToClientSideDecl<'a> { } impl<'a> ToClientSideDecl<'a> { - fn fold_item_trait_items(&mut self, items: Vec) -> Vec { + /// Process the given [`ItemTrait`]. + fn process(mut self, decl: ItemTrait) -> ItemTrait { + let mut decl = self.fold_item_trait(decl); + + let block_id = self.block_id; + let crate_ = self.crate_; + + // Add the special method that will be implemented by the `impl_runtime_apis!` macro + // to enable functions to call into the runtime. + decl.items.push(parse_quote! { + /// !!INTERNAL USE ONLY!! + #[doc(hidden)] + fn __runtime_api_internal_call_api_at( + &self, + at: &#block_id, + context: #crate_::ExecutionContext, + params: std::vec::Vec, + fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, + ) -> std::result::Result, #crate_::ApiError>; + }); + + decl + } +} + +impl<'a> ToClientSideDecl<'a> { + fn fold_item_trait_items( + &mut self, + items: Vec, + trait_generics_num: usize, + ) -> Vec { let mut result = Vec::new(); items.into_iter().for_each(|i| match i { TraitItem::Method(method) => { - let (fn_decl, fn_impl, fn_decl_ctx) = self.fold_trait_item_method(method); + let (fn_decl, fn_decl_ctx) = + self.fold_trait_item_method(method, trait_generics_num); result.push(fn_decl.into()); result.push(fn_decl_ctx.into()); - - if let Some(fn_impl) = fn_impl { - result.push(fn_impl.into()); - } }, r => result.push(r), }); @@ -531,20 +374,24 @@ impl<'a> ToClientSideDecl<'a> { fn fold_trait_item_method( &mut self, method: TraitItemMethod, - ) -> (TraitItemMethod, Option, TraitItemMethod) { + trait_generics_num: usize, + ) -> (TraitItemMethod, TraitItemMethod) { let crate_ = self.crate_; let context = quote!( #crate_::ExecutionContext::OffchainCall(None) ); - let fn_impl = self.create_method_runtime_api_impl(method.clone()); - let fn_decl = self.create_method_decl(method.clone(), context); - let fn_decl_ctx = self.create_method_decl_with_context(method); + let fn_decl = self.create_method_decl(method.clone(), context, trait_generics_num); + let fn_decl_ctx = self.create_method_decl_with_context(method, trait_generics_num); - (fn_decl, fn_impl, fn_decl_ctx) + (fn_decl, fn_decl_ctx) } - fn create_method_decl_with_context(&mut self, method: TraitItemMethod) -> TraitItemMethod { + fn create_method_decl_with_context( + &mut self, + method: TraitItemMethod, + trait_generics_num: usize, + ) -> TraitItemMethod { let crate_ = self.crate_; let context_arg: syn::FnArg = parse_quote!( context: #crate_::ExecutionContext ); - let mut fn_decl_ctx = self.create_method_decl(method, quote!(context)); + let mut fn_decl_ctx = self.create_method_decl(method, quote!(context), trait_generics_num); fn_decl_ctx.sig.ident = Ident::new(&format!("{}_with_context", &fn_decl_ctx.sig.ident), Span::call_site()); fn_decl_ctx.sig.inputs.insert(2, context_arg); @@ -552,52 +399,6 @@ impl<'a> ToClientSideDecl<'a> { fn_decl_ctx } - /// Takes the given method and creates a `method_runtime_api_impl` method that will be - /// implemented in the runtime for the client side. - fn create_method_runtime_api_impl( - &mut self, - mut method: TraitItemMethod, - ) -> Option { - if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { - return None - } - - let fn_sig = &method.sig; - let ret_type = return_type_extract_type(&fn_sig.output); - - // Get types and if the value is borrowed from all parameters. - // If there is an error, we push it as the block to the user. - let param_types = - match extract_parameter_names_types_and_borrows(fn_sig, AllowSelfRefInParameters::No) { - Ok(res) => res - .into_iter() - .map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }) - .collect::>(), - Err(e) => { - self.errors.push(e.to_compile_error()); - Vec::new() - }, - }; - let name = generate_method_runtime_api_impl_name(self.trait_, &method.sig.ident); - let block_id = self.block_id; - let crate_ = self.crate_; - - Some(parse_quote! { - #[doc(hidden)] - fn #name( - &self, - at: &#block_id, - context: #crate_::ExecutionContext, - params: Option<( #( #param_types ),* )>, - params_encoded: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError>; - }) - } - /// Takes the method declared by the user and creates the declaration we require for the runtime /// api client side. This method will call by default the `method_runtime_api_impl` for doing /// the actual call into the runtime. @@ -605,6 +406,7 @@ impl<'a> ToClientSideDecl<'a> { &mut self, mut method: TraitItemMethod, context: TokenStream, + trait_generics_num: usize, ) -> TraitItemMethod { let params = match extract_parameter_names_types_and_borrows( &method.sig, @@ -616,18 +418,42 @@ impl<'a> ToClientSideDecl<'a> { Vec::new() }, }; - let params2 = params.clone(); let ret_type = return_type_extract_type(&method.sig.output); fold_fn_decl_for_client_side(&mut method.sig, self.block_id, self.crate_); - let name_impl = generate_method_runtime_api_impl_name(self.trait_, &method.sig.ident); let crate_ = self.crate_; let found_attributes = remove_supported_attributes(&mut method.attrs); + + // Parse the renamed attributes. + let mut renames = Vec::new(); + for (_, a) in found_attributes.iter().filter(|a| a.0 == &RENAMED_ATTRIBUTE) { + match parse_renamed_attribute(a) { + Ok((old_name, version)) => { + renames.push((version, prefix_function_with_trait(&self.trait_, &old_name))); + }, + Err(e) => self.errors.push(e.to_compile_error()), + } + } + + renames.sort_by(|l, r| r.cmp(l)); + let (versions, old_names) = renames.into_iter().fold( + (Vec::new(), Vec::new()), + |(mut versions, mut old_names), (version, old_name)| { + versions.push(version); + old_names.push(old_name); + (versions, old_names) + }, + ); + + // Generate the function name before we may rename it below to + // `function_name_before_version_{}`. + let function_name = prefix_function_with_trait(&self.trait_, &method.sig.ident); + // If the method has a `changed_in` attribute, we need to alter the method name to // `method_before_version_VERSION`. - let (native_handling, param_tuple) = match get_changed_in(&found_attributes) { + match get_changed_in(&found_attributes) { Ok(Some(version)) => { // Make sure that the `changed_in` version is at least the current `api_version`. if get_api_version(self.found_attributes).ok() < Some(version) { @@ -646,47 +472,51 @@ impl<'a> ToClientSideDecl<'a> { ); method.sig.ident = ident; method.attrs.push(parse_quote!( #[deprecated] )); - - let panic = - format!("Calling `{}` should not return a native value!", method.sig.ident); - (quote!(panic!(#panic)), quote!(None)) }, - Ok(None) => (quote!(Ok(n)), quote!( Some(( #( #params2 ),* )) )), + Ok(None) => {}, Err(e) => { self.errors.push(e.to_compile_error()); - (quote!(unimplemented!()), quote!(None)) }, }; - let function_name = method.sig.ident.to_string(); + // The module where the runtime relevant stuff is declared. + let trait_name = &self.trait_; + let runtime_mod = generate_runtime_mod_name_for_trait(trait_name); + let underscores = (0..trait_generics_num).map(|_| quote!(_)); // Generate the default implementation that calls the `method_runtime_api_impl` method. method.default = Some(parse_quote! { { - let runtime_api_impl_params_encoded = + let __runtime_api_impl_params_encoded__ = #crate_::Encode::encode(&( #( &#params ),* )); - self.#name_impl( + >::__runtime_api_internal_call_api_at( + self, __runtime_api_at_param__, #context, - #param_tuple, - runtime_api_impl_params_encoded, - ).and_then(|r| - match r { - #crate_::NativeOrEncoded::Native(n) => { - #native_handling - }, - #crate_::NativeOrEncoded::Encoded(r) => { - std::result::Result::map_err( - <#ret_type as #crate_::Decode>::decode(&mut &r[..]), - |err| #crate_::ApiError::FailedToDecodeReturnValue { - function: #function_name, - error: err, - } - ) - } + __runtime_api_impl_params_encoded__, + &|version| { + #( + // Check if we need to call the function by an old name. + if version.apis.iter().any(|(s, v)| { + s == &#runtime_mod::ID && *v < #versions + }) { + return #old_names + } + )* + + #function_name } ) + .and_then(|r| + std::result::Result::map_err( + <#ret_type as #crate_::Decode>::decode(&mut &r[..]), + |err| #crate_::ApiError::FailedToDecodeReturnValue { + function: #function_name, + error: err, + } + ) + ) } }); @@ -714,37 +544,12 @@ impl<'a> Fold for ToClientSideDecl<'a> { // The client side trait is only required when compiling with the feature `std` or `test`. input.attrs.push(parse_quote!( #[cfg(any(feature = "std", test))] )); - input.items = self.fold_item_trait_items(input.items); + input.items = self.fold_item_trait_items(input.items, input.generics.params.len()); fold::fold_item_trait(self, input) } } -/// Parse the given attribute as `API_VERSION_ATTRIBUTE`. -fn parse_runtime_api_version(version: &Attribute) -> Result { - let meta = version.parse_meta()?; - - let err = Err(Error::new( - meta.span(), - &format!( - "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", - api_version = API_VERSION_ATTRIBUTE - ), - )); - - match meta { - Meta::List(list) => - if list.nested.len() != 1 { - err - } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { - i.base10_parse() - } else { - err - }, - _ => err, - } -} - /// Generates the identifier as const variable for the given `trait_name` /// by hashing the `trait_name`. fn generate_runtime_api_id(trait_name: &str) -> TokenStream { @@ -821,16 +626,14 @@ fn generate_client_side_decls(decls: &[ItemTrait]) -> Result { let mut errors = Vec::new(); let trait_ = decl.ident.clone(); - let decl = { - let mut to_client_side = ToClientSideDecl { - crate_: &crate_, - block_id: &block_id, - found_attributes: &mut found_attributes, - errors: &mut errors, - trait_: &trait_, - }; - to_client_side.fold_item_trait(decl) - }; + let decl = ToClientSideDecl { + crate_: &crate_, + block_id: &block_id, + found_attributes: &mut found_attributes, + errors: &mut errors, + trait_: &trait_, + } + .process(decl); let api_version = get_api_version(&found_attributes); diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 0ac3cfbe1244e..5ab8fa0521699 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -17,13 +17,13 @@ use crate::utils::{ extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, - extract_parameter_names_types_and_borrows, generate_call_api_at_fn_name, generate_crate_access, - generate_hidden_includes, generate_method_runtime_api_impl_name, - generate_native_call_generator_fn_name, generate_runtime_mod_name_for_trait, - prefix_function_with_trait, return_type_extract_type, AllowSelfRefInParameters, - RequireQualifiedTraitPath, + extract_parameter_names_types_and_borrows, generate_crate_access, generate_hidden_includes, + generate_runtime_mod_name_for_trait, parse_runtime_api_version, prefix_function_with_trait, + versioned_trait_name, AllowSelfRefInParameters, RequireQualifiedTraitPath, }; +use crate::common::API_VERSION_ATTRIBUTE; + use proc_macro2::{Span, TokenStream}; use quote::quote; @@ -33,8 +33,7 @@ use syn::{ parse::{Error, Parse, ParseStream, Result}, parse_macro_input, parse_quote, spanned::Spanned, - Attribute, GenericArgument, Ident, ImplItem, ItemImpl, Path, PathArguments, Signature, Type, - TypePath, + Attribute, Ident, ImplItem, ItemImpl, Path, Signature, Type, TypePath, }; use std::collections::HashSet; @@ -105,8 +104,10 @@ fn generate_impl_calls( let mut impl_calls = Vec::new(); for impl_ in impls { + let trait_api_ver = extract_api_version(&impl_.attrs, impl_.span())?; let impl_trait_path = extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?; let impl_trait = extend_with_runtime_decl_path(impl_trait_path.clone()); + let impl_trait = extend_with_api_version(impl_trait, trait_api_ver); let impl_trait_ident = &impl_trait_path .segments .last() @@ -276,9 +277,13 @@ fn generate_runtime_api_base_structures() -> Result { std::clone::Clone::clone(&self.recorder) } - fn extract_proof(&mut self) -> std::option::Option<#crate_::StorageProof> { - std::option::Option::take(&mut self.recorder) - .map(|recorder| #crate_::ProofRecorder::::to_storage_proof(&recorder)) + fn extract_proof( + &mut self, + ) -> std::option::Option<#crate_::StorageProof> { + let recorder = std::option::Option::take(&mut self.recorder); + std::option::Option::map(recorder, |recorder| { + #crate_::ProofRecorder::::drain_storage_proof(recorder) + }) } fn into_storage_changes( @@ -326,35 +331,6 @@ fn generate_runtime_api_base_structures() -> Result { #[cfg(any(feature = "std", test))] impl> RuntimeApiImpl { - fn call_api_at< - R: #crate_::Encode + #crate_::Decode + std::cmp::PartialEq, - F: FnOnce( - &C, - &std::cell::RefCell<#crate_::OverlayedChanges>, - &std::cell::RefCell<#crate_::StorageTransactionCache>, - &std::option::Option<#crate_::ProofRecorder>, - ) -> std::result::Result<#crate_::NativeOrEncoded, E>, - E, - >( - &self, - call_api_at: F, - ) -> std::result::Result<#crate_::NativeOrEncoded, E> { - if *std::cell::RefCell::borrow(&self.commit_on_success) { - #crate_::OverlayedChanges::start_transaction( - &mut std::cell::RefCell::borrow_mut(&self.changes) - ); - } - let res = call_api_at( - &self.call, - &self.changes, - &self.storage_transaction_cache, - &self.recorder, - ); - - self.commit_or_rollback(std::result::Result::is_ok(&res)); - res - } - fn commit_or_rollback(&self, commit: bool) { let proof = "\ We only close a transaction when we opened one ourself. @@ -398,6 +374,24 @@ fn extend_with_runtime_decl_path(mut trait_: Path) -> Path { trait_ } +fn extend_with_api_version(mut trait_: Path, version: Option) -> Path { + let version = if let Some(v) = version { + v + } else { + // nothing to do + return trait_ + }; + + let trait_name = &mut trait_ + .segments + .last_mut() + .expect("Trait path should always contain at least one item; qed") + .ident; + *trait_name = versioned_trait_name(trait_name, version); + + trait_ +} + /// Generates the implementations of the apis for the runtime. fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { let mut impls_prepared = Vec::new(); @@ -405,9 +399,12 @@ fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { // We put `runtime` before each trait to get the trait that is intended for the runtime and // we put the `RuntimeBlock` as first argument for the trait generics. for impl_ in impls.iter() { + let trait_api_ver = extract_api_version(&impl_.attrs, impl_.span())?; + let mut impl_ = impl_.clone(); let trait_ = extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?.clone(); let trait_ = extend_with_runtime_decl_path(trait_); + let trait_ = extend_with_api_version(trait_, trait_api_ver); impl_.trait_.as_mut().unwrap().1 = trait_; impl_.attrs = filter_cfg_attrs(&impl_.attrs); @@ -424,121 +421,70 @@ fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { /// with code that calls into the runtime. struct ApiRuntimeImplToApiRuntimeApiImpl<'a> { runtime_block: &'a TypePath, - runtime_mod_path: &'a Path, - runtime_type: &'a Type, - trait_generic_arguments: &'a [GenericArgument], - impl_trait: &'a Ident, } -impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { - fn fold_type_path(&mut self, input: TypePath) -> TypePath { - let new_ty_path = - if input == *self.runtime_block { parse_quote!(__SR_API_BLOCK__) } else { input }; - - fold::fold_type_path(self, new_ty_path) - } +impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> { + /// Process the given item implementation. + fn process(mut self, input: ItemImpl) -> ItemImpl { + let mut input = self.fold_item_impl(input); - fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { - let block = { - let runtime_mod_path = self.runtime_mod_path; - let runtime = self.runtime_type; - let native_call_generator_ident = - generate_native_call_generator_fn_name(&input.sig.ident); - let call_api_at_call = generate_call_api_at_fn_name(&input.sig.ident); - let trait_generic_arguments = self.trait_generic_arguments; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Generate the access to the native parameters - let param_tuple_access = if input.sig.inputs.len() == 1 { - vec![quote!(p)] - } else { - input - .sig - .inputs - .iter() - .enumerate() - .map(|(i, _)| { - let i = syn::Index::from(i); - quote!( p.#i ) - }) - .collect::>() - }; - - let (param_types, error) = match extract_parameter_names_types_and_borrows( - &input.sig, - AllowSelfRefInParameters::No, - ) { - Ok(res) => ( - res.into_iter() - .map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }) - .collect::>(), - None, - ), - Err(e) => (Vec::new(), Some(e.to_compile_error())), - }; + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - // Rewrite the input parameters. - input.sig.inputs = parse_quote! { + // Delete all functions, because all of them are default implemented by + // `decl_runtime_apis!`. We only need to implement the `__runtime_api_internal_call_api_at` + // function. + input.items.clear(); + input.items.push(parse_quote! { + fn __runtime_api_internal_call_api_at( &self, at: &#crate_::BlockId<__SR_API_BLOCK__>, context: #crate_::ExecutionContext, - params: Option<( #( #param_types ),* )>, - params_encoded: Vec, - }; - - input.sig.ident = - generate_method_runtime_api_impl_name(self.impl_trait, &input.sig.ident); - let ret_type = return_type_extract_type(&input.sig.output); - - // Generate the correct return type. - input.sig.output = parse_quote!( - -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError> - ); - - // Generate the new method implementation that calls into the runtime. - parse_quote!( - { - // Get the error to the user (if we have one). - #error - - self.call_api_at( - | - call_runtime_at, - changes, - storage_transaction_cache, - recorder - | { - #runtime_mod_path #call_api_at_call( - call_runtime_at, - at, - params_encoded, - changes, - storage_transaction_cache, - params.map(|p| { - #runtime_mod_path #native_call_generator_ident :: - <#runtime, __SR_API_BLOCK__ #(, #trait_generic_arguments )*> ( - #( #param_tuple_access ),* - ) - }), - context, - recorder, - ) - } - ) + params: std::vec::Vec, + fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, + ) -> std::result::Result, #crate_::ApiError> { + if *std::cell::RefCell::borrow(&self.commit_on_success) { + #crate_::OverlayedChanges::start_transaction( + &mut std::cell::RefCell::borrow_mut(&self.changes) + ); } - ) - }; - let mut input = fold::fold_impl_item_method(self, input); - // We need to set the block, after we modified the rest of the ast, otherwise we would - // modify our generated block as well. - input.block = block; + let res = (|| { + let version = #crate_::CallApiAt::<__SR_API_BLOCK__>::runtime_version_at(self.call, at)?; + + let params = #crate_::CallApiAtParams::<_, fn() -> _, _> { + at, + function: (*fn_name)(version), + native_call: None, + arguments: params, + overlayed_changes: &self.changes, + storage_transaction_cache: &self.storage_transaction_cache, + context, + recorder: &self.recorder, + }; + + #crate_::CallApiAt::<__SR_API_BLOCK__>::call_api_at::<#crate_::NeverNativeValue, _>( + self.call, + params, + ) + })(); + + self.commit_or_rollback(std::result::Result::is_ok(&res)); + + res.map(#crate_::NativeOrEncoded::into_encoded) + } + }); + input } +} + +impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { + fn fold_type_path(&mut self, input: TypePath) -> TypePath { + let new_ty_path = + if input == *self.runtime_block { parse_quote!(__SR_API_BLOCK__) } else { input }; + + fold::fold_type_path(self, new_ty_path) + } fn fold_item_impl(&mut self, mut input: ItemImpl) -> ItemImpl { // All this `UnwindSafe` magic below here is required for this rust bug: @@ -594,45 +540,55 @@ fn generate_api_impl_for_runtime_api(impls: &[ItemImpl]) -> Result for impl_ in impls { let impl_trait_path = extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?; - let impl_trait = &impl_trait_path - .segments - .last() - .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? - .clone(); let runtime_block = extract_block_type_from_trait_path(impl_trait_path)?; - let runtime_type = &impl_.self_ty; let mut runtime_mod_path = extend_with_runtime_decl_path(impl_trait_path.clone()); // remove the trait to get just the module path runtime_mod_path.segments.pop(); - let trait_generic_arguments = match impl_trait.arguments { - PathArguments::Parenthesized(_) | PathArguments::None => vec![], - PathArguments::AngleBracketed(ref b) => b.args.iter().cloned().collect(), - }; - - let mut visitor = ApiRuntimeImplToApiRuntimeApiImpl { - runtime_block, - runtime_mod_path: &runtime_mod_path, - runtime_type: &*runtime_type, - trait_generic_arguments: &trait_generic_arguments, - impl_trait: &impl_trait.ident, - }; + let processed_impl = + ApiRuntimeImplToApiRuntimeApiImpl { runtime_block }.process(impl_.clone()); - result.push(visitor.fold_item_impl(impl_.clone())); + result.push(processed_impl); } Ok(quote!( #( #result )* )) } +fn populate_runtime_api_versions( + result: &mut Vec, + sections: &mut Vec, + attrs: Vec, + id: Path, + version: TokenStream, + crate_access: &TokenStream, +) { + result.push(quote!( + #( #attrs )* + (#id, #version) + )); + + sections.push(quote!( + #( #attrs )* + const _: () = { + // All sections with the same name are going to be merged by concatenation. + #[cfg(not(feature = "std"))] + #[link_section = "runtime_apis"] + static SECTION_CONTENTS: [u8; 12] = #crate_access::serialize_runtime_api_info(#id, #version); + }; + )); +} + /// Generates `RUNTIME_API_VERSIONS` that holds all version information about the implemented /// runtime apis. fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { - let mut result = Vec::with_capacity(impls.len()); - let mut sections = Vec::with_capacity(impls.len()); + let mut result = Vec::::with_capacity(impls.len()); + let mut sections = Vec::::with_capacity(impls.len()); let mut processed_traits = HashSet::new(); let c = generate_crate_access(HIDDEN_INCLUDES_ID); for impl_ in impls { + let api_ver = extract_api_version(&impl_.attrs, impl_.span())?.map(|a| a as u32); + let mut path = extend_with_runtime_decl_path( extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?.clone(), ); @@ -655,23 +611,11 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { } let id: Path = parse_quote!( #path ID ); - let version: Path = parse_quote!( #path VERSION ); + let version = quote!( #path VERSION ); let attrs = filter_cfg_attrs(&impl_.attrs); - result.push(quote!( - #( #attrs )* - (#id, #version) - )); - - sections.push(quote!( - #( #attrs )* - const _: () = { - // All sections with the same name are going to be merged by concatenation. - #[cfg(not(feature = "std"))] - #[link_section = "runtime_apis"] - static SECTION_CONTENTS: [u8; 12] = #c::serialize_runtime_api_info(#id, #version); - }; - )); + let api_ver = api_ver.map(|a| quote!( #a )).unwrap_or_else(|| version); + populate_runtime_api_versions(&mut result, &mut sections, attrs, id, api_ver, &c) } Ok(quote!( @@ -726,6 +670,33 @@ fn filter_cfg_attrs(attrs: &[Attribute]) -> Vec { attrs.iter().filter(|a| a.path.is_ident("cfg")).cloned().collect() } +// Extracts the value of `API_VERSION_ATTRIBUTE` and handles errors. +// Returns: +// - Err if the version is malformed +// - Some(u64) if the version is set +// - None if the version is not set (this is valid). +fn extract_api_version(attrs: &Vec, span: Span) -> Result> { + // First fetch all `API_VERSION_ATTRIBUTE` values (should be only one) + let api_ver = attrs + .iter() + .filter(|a| a.path.is_ident(API_VERSION_ATTRIBUTE)) + .collect::>(); + + if api_ver.len() > 1 { + return Err(Error::new( + span, + format!( + "Found multiple #[{}] attributes for an API implementation. \ + Each runtime API can have only one version.", + API_VERSION_ATTRIBUTE + ), + )) + } + + // Parse the runtime version if there exists one. + api_ver.first().map(|v| parse_runtime_api_version(v)).transpose() +} + #[cfg(test)] mod tests { use super::*; diff --git a/primitives/api/proc-macro/src/lib.rs b/primitives/api/proc-macro/src/lib.rs index 20a2f76f2c83d..31636b8e2d545 100644 --- a/primitives/api/proc-macro/src/lib.rs +++ b/primitives/api/proc-macro/src/lib.rs @@ -21,6 +21,7 @@ use proc_macro::TokenStream; +mod common; mod decl_runtime_apis; mod impl_runtime_apis; mod mock_impl_runtime_apis; diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 6098f8d6bd741..e43a302e18923 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -18,8 +18,7 @@ use crate::utils::{ extract_block_type_from_trait_path, extract_impl_trait, extract_parameter_names_types_and_borrows, generate_crate_access, generate_hidden_includes, - generate_method_runtime_api_impl_name, return_type_extract_type, AllowSelfRefInParameters, - RequireQualifiedTraitPath, + return_type_extract_type, AllowSelfRefInParameters, RequireQualifiedTraitPath, }; use proc_macro2::{Span, TokenStream}; @@ -31,7 +30,7 @@ use syn::{ parse::{Error, Parse, ParseStream, Result}, parse_macro_input, parse_quote, spanned::Spanned, - Attribute, Ident, ItemImpl, Pat, Type, TypePath, + Attribute, ItemImpl, Pat, Type, TypePath, }; /// Unique identifier used to make the hidden includes unique for this macro. @@ -40,7 +39,7 @@ const HIDDEN_INCLUDES_ID: &str = "MOCK_IMPL_RUNTIME_APIS"; /// The `advanced` attribute. /// /// If this attribute is given to a function, the function gets access to the `BlockId` as first -/// parameter and needs to return a `Result` with the appropiate error type. +/// parameter and needs to return a `Result` with the appropriate error type. const ADVANCED_ATTRIBUTE: &str = "advanced"; /// The structure used for parsing the runtime api implementations. @@ -105,7 +104,9 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result Option<#crate_::StorageProof> { + fn extract_proof( + &mut self, + ) -> Option<#crate_::StorageProof> { unimplemented!("`extract_proof` not implemented for runtime api mocks") } @@ -126,34 +127,63 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result for #self_ty { - fn Core_version_runtime_api_impl( + fn __runtime_api_internal_call_api_at( &self, _: &#crate_::BlockId<#block_type>, _: #crate_::ExecutionContext, - _: Option<()>, - _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#crate_::RuntimeVersion>, #crate_::ApiError> { - unimplemented!("Not required for testing!") + _: std::vec::Vec, + _: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, + ) -> std::result::Result, #crate_::ApiError> { + unimplemented!("`__runtime_api_internal_call_api_at` not implemented for runtime api mocks") } - fn Core_execute_block_runtime_api_impl( + fn version( + &self, + _: &#crate_::BlockId<#block_type>, + ) -> std::result::Result<#crate_::RuntimeVersion, #crate_::ApiError> { + unimplemented!("`Core::version` not implemented for runtime api mocks") + } + + fn version_with_context( + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + ) -> std::result::Result<#crate_::RuntimeVersion, #crate_::ApiError> { + unimplemented!("`Core::version` not implemented for runtime api mocks") + } + + fn execute_block( + &self, + _: &#crate_::BlockId<#block_type>, + _: #block_type, + ) -> std::result::Result<(), #crate_::ApiError> { + unimplemented!("`Core::execute_block` not implemented for runtime api mocks") + } + + fn execute_block_with_context( &self, _: &#crate_::BlockId<#block_type>, _: #crate_::ExecutionContext, - _: Option<#block_type>, - _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #crate_::ApiError> { - unimplemented!("Not required for testing!") + _: #block_type, + ) -> std::result::Result<(), #crate_::ApiError> { + unimplemented!("`Core::execute_block` not implemented for runtime api mocks") } - fn Core_initialize_block_runtime_api_impl( + fn initialize_block( + &self, + _: &#crate_::BlockId<#block_type>, + _: &<#block_type as #crate_::BlockT>::Header, + ) -> std::result::Result<(), #crate_::ApiError> { + unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") + } + + fn initialize_block_with_context( &self, _: &#crate_::BlockId<#block_type>, _: #crate_::ExecutionContext, - _: Option<&<#block_type as #crate_::BlockT>::Header>, - _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #crate_::ApiError> { - unimplemented!("Not required for testing!") + _: &<#block_type as #crate_::BlockT>::Header, + ) -> std::result::Result<(), #crate_::ApiError> { + unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") } } )) @@ -216,14 +246,53 @@ fn get_at_param_name( } } -/// Auxialiry structure to fold a runtime api trait implementation into the expected format. +/// Auxiliary structure to fold a runtime api trait implementation into the expected format. /// /// This renames the methods, changes the method parameters and extracts the error type. struct FoldRuntimeApiImpl<'a> { /// The block type that is being used. block_type: &'a TypePath, - /// The identifier of the trait being implemented. - impl_trait: &'a Ident, +} + +impl<'a> FoldRuntimeApiImpl<'a> { + /// Process the given [`syn::ItemImpl`]. + fn process(mut self, impl_item: syn::ItemImpl) -> syn::ItemImpl { + let mut impl_item = self.fold_item_impl(impl_item); + + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + // We also need to overwrite all the `_with_context` methods. To do this, + // we clone all methods and add them again with the new name plus one more argument. + impl_item.items.extend(impl_item.items.clone().into_iter().filter_map(|i| { + if let syn::ImplItem::Method(mut m) = i { + m.sig.ident = quote::format_ident!("{}_with_context", m.sig.ident); + m.sig.inputs.insert(2, parse_quote!( _: #crate_::ExecutionContext )); + + Some(m.into()) + } else { + None + } + })); + + let block_type = self.block_type; + + impl_item.items.push(parse_quote! { + fn __runtime_api_internal_call_api_at( + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + _: std::vec::Vec, + _: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, + ) -> std::result::Result, #crate_::ApiError> { + unimplemented!( + "`__runtime_api_internal_call_api_at` not implemented for runtime api mocks. \ + Calling deprecated methods is not supported by mocked runtime api." + ) + } + }); + + impl_item + } } impl<'a> Fold for FoldRuntimeApiImpl<'a> { @@ -277,14 +346,9 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { input.sig.inputs = parse_quote! { &self, #at_param_name: #block_id_type, - _: #crate_::ExecutionContext, - ___params___sp___api___: Option<( #( #param_types ),* )>, - _: Vec, + #( #param_names: #param_types ),* }; - input.sig.ident = - generate_method_runtime_api_impl_name(self.impl_trait, &input.sig.ident); - // When using advanced, the user needs to declare the correct return type on its own, // otherwise do it for the user. if !is_advanced { @@ -292,7 +356,7 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { // Generate the correct return type. input.sig.output = parse_quote!( - -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError> + -> std::result::Result<#ret_type, #crate_::ApiError> ); } @@ -304,7 +368,7 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { quote! { let __fn_implementation__ = move || #orig_block; - Ok(#crate_::NativeOrEncoded::Native(__fn_implementation__())) + Ok(__fn_implementation__()) } }; @@ -314,9 +378,6 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { // Get the error to the user (if we have one). #( #errors )* - let (#( #param_names ),*) = ___params___sp___api___ - .expect("Mocked runtime apis don't support calling deprecated api versions"); - #construct_return_value } ) @@ -351,11 +412,6 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result Result Some(block_type.clone()), }; - let mut visitor = FoldRuntimeApiImpl { block_type, impl_trait: &impl_trait.ident }; - - result.push(visitor.fold_item_impl(impl_.clone())); + result.push(FoldRuntimeApiImpl { block_type }.process(impl_.clone())); } Ok(GeneratedRuntimeApiImpls { diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 97b456b62dfa6..2ccd050cfb151 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -18,16 +18,18 @@ use proc_macro2::{Span, TokenStream}; use syn::{ - parse_quote, spanned::Spanned, token::And, Error, FnArg, GenericArgument, Ident, ImplItem, - ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, + parse_quote, spanned::Spanned, token::And, Attribute, Error, FnArg, GenericArgument, Ident, + ImplItem, ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, }; -use quote::quote; +use quote::{format_ident, quote}; use std::env; use proc_macro_crate::{crate_name, FoundCrate}; +use crate::common::API_VERSION_ATTRIBUTE; + fn generate_hidden_includes_mod_name(unique_id: &'static str) -> Ident { Ident::new(&format!("sp_api_hidden_includes_{}", unique_id), Span::call_site()) } @@ -68,11 +70,6 @@ pub fn generate_runtime_mod_name_for_trait(trait_: &Ident) -> Ident { Ident::new(&format!("runtime_decl_for_{}", trait_), Span::call_site()) } -/// Generates a name for a method that needs to be implemented in the runtime for the client side. -pub fn generate_method_runtime_api_impl_name(trait_: &Ident, method: &Ident) -> Ident { - Ident::new(&format!("{}_{}_runtime_api_impl", trait_, method), Span::call_site()) -} - /// Get the type of a `syn::ReturnType`. pub fn return_type_extract_type(rt: &ReturnType) -> Type { match rt { @@ -166,16 +163,6 @@ pub fn extract_parameter_names_types_and_borrows( Ok(result) } -/// Generates the name for the native call generator function. -pub fn generate_native_call_generator_fn_name(fn_name: &Ident) -> Ident { - Ident::new(&format!("{}_native_call_generator", fn_name), Span::call_site()) -} - -/// Generates the name for the call api at function. -pub fn generate_call_api_at_fn_name(fn_name: &Ident) -> Ident { - Ident::new(&format!("{}_call_api_at", fn_name), Span::call_site()) -} - /// Prefix the given function with the trait name. pub fn prefix_function_with_trait(trait_: &Ident, function: &F) -> String { format!("{}_{}", trait_, function.to_string()) @@ -267,3 +254,23 @@ pub fn extract_impl_trait(impl_: &ItemImpl, require: RequireQualifiedTraitPath) } }) } + +/// Parse the given attribute as `API_VERSION_ATTRIBUTE`. +pub fn parse_runtime_api_version(version: &Attribute) -> Result { + let version = version.parse_args::().map_err(|_| { + Error::new( + version.span(), + &format!( + "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", + api_version = API_VERSION_ATTRIBUTE + ), + ) + })?; + + version.base10_parse() +} + +// Each versioned trait is named 'ApiNameVN' where N is the specific version. E.g. ParachainHostV2 +pub fn versioned_trait_name(trait_ident: &Ident, version: u64) -> Ident { + format_ident!("{}V{}", trait_ident, version) +} diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 2635c81948ff3..353ebd91cf5df 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -78,12 +78,12 @@ pub use hash_db::Hasher; #[doc(hidden)] #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; -#[doc(hidden)] -#[cfg(feature = "std")] -pub use sp_core::NativeOrEncoded; use sp_core::OpaqueMetadata; #[doc(hidden)] pub use sp_core::{offchain, ExecutionContext}; +#[doc(hidden)] +#[cfg(feature = "std")] +pub use sp_core::{NativeOrEncoded, NeverNativeValue}; #[cfg(feature = "std")] pub use sp_runtime::StateVersion; #[doc(hidden)] @@ -99,7 +99,8 @@ pub use sp_runtime::{ #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ - Backend as StateBackend, InMemoryBackend, OverlayedChanges, StorageProof, + backend::AsTrieBackend, Backend as StateBackend, InMemoryBackend, OverlayedChanges, + StorageProof, TrieBackend, TrieBackendBuilder, }; #[cfg(feature = "std")] use sp_std::result; @@ -187,6 +188,56 @@ pub const MAX_EXTRINSIC_DEPTH: u32 = 256; /// To check if a given runtime implements a runtime api trait, the `RuntimeVersion` has the /// function `has_api()`. Also the `ApiExt` provides a function `has_api(at: &BlockId)` /// to check if the runtime at the given block id implements the requested runtime api trait. +/// +/// # Declaring multiple api versions +/// +/// Optionally multiple versions of the same api can be declared. This is useful for +/// development purposes. For example you want to have a testing version of the api which is +/// available only on a testnet. You can define one stable and one development version. This +/// can be done like this: +/// ```rust +/// sp_api::decl_runtime_apis! { +/// /// Declare the api trait. +/// #[api_version(2)] +/// pub trait Balance { +/// /// Get the balance. +/// fn get_balance() -> u64; +/// /// Set the balance. +/// fn set_balance(val: u64); +/// /// Transfer the balance to another user id +/// #[api_version(3)] +/// fn transfer_balance(uid: u64); +/// } +/// } +/// +/// # fn main() {} +/// ``` +/// The example above defines two api versions - 2 and 3. Version 2 contains `get_balance` and +/// `set_balance`. Version 3 additionally contains `transfer_balance`, which is not available +/// in version 2. Version 2 in this case is considered the default/base version of the api. +/// More than two versions can be defined this way. For example: +/// ```rust +/// sp_api::decl_runtime_apis! { +/// /// Declare the api trait. +/// #[api_version(2)] +/// pub trait Balance { +/// /// Get the balance. +/// fn get_balance() -> u64; +/// /// Set the balance. +/// fn set_balance(val: u64); +/// /// Transfer the balance to another user id +/// #[api_version(3)] +/// fn transfer_balance(uid: u64); +/// /// Clears the balance +/// #[api_version(4)] +/// fn clear_balance(); +/// } +/// } +/// +/// # fn main() {} +/// ``` +/// Note that the latest version (4 in our example above) always contains all methods from all +/// the versions before. pub use sp_api_proc_macro::decl_runtime_apis; /// Tags given trait implementations as runtime apis. @@ -276,6 +327,22 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// /// # fn main() {} /// ``` +/// +/// # Implementing specific api version +/// +/// If `decl_runtime_apis!` declares multiple versions for an api `impl_runtime_apis!` +/// should specify which version it implements by adding `api_version` attribute to the +/// `impl` block. If omitted - the base/default version is implemented. Here is an example: +/// ```ignore +/// sp_api::impl_runtime_apis! { +/// #[api_version(3)] +/// impl self::Balance for Runtime { +/// // implementation +/// } +/// } +/// ``` +/// In this case `Balance` api version 3 is being implemented for `Runtime`. The `impl` block +/// must contain all methods declared in version 3 and below. pub use sp_api_proc_macro::impl_runtime_apis; /// Mocks given trait implementations as runtime apis. @@ -341,15 +408,13 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// using the `advanced` attribute, the macro expects that the first parameter of the function /// is this `at` parameter. Besides that the macro also doesn't do the automatic return value /// rewrite, which means that full return value must be specified. The full return value is -/// constructed like [`Result`]`<`[`NativeOrEncoded`](sp_api::NativeOrEncoded)`, -/// Error>` while `ReturnValue` being the return value that is specified in the trait -/// declaration. +/// constructed like [`Result`]`<, Error>` while `ReturnValue` being the return +/// value that is specified in the trait declaration. /// /// ## Example /// ```rust /// # use sp_runtime::{traits::Block as BlockT, generic::BlockId}; /// # use sp_test_primitives::Block; -/// # use sp_core::NativeOrEncoded; /// # use codec; /// # /// # sp_api::decl_runtime_apis! { @@ -368,13 +433,13 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// sp_api::mock_impl_runtime_apis! { /// impl Balance for MockApi { /// #[advanced] -/// fn get_balance(&self, at: &BlockId) -> Result, sp_api::ApiError> { +/// fn get_balance(&self, at: &BlockId) -> Result { /// println!("Being called at: {}", at); /// /// Ok(self.balance.into()) /// } /// #[advanced] -/// fn set_balance(at: &BlockId, val: u64) -> Result, sp_api::ApiError> { +/// fn set_balance(at: &BlockId, val: u64) -> Result<(), sp_api::ApiError> { /// if let BlockId::Number(1) = at { /// println!("Being called to set balance to: {}", val); /// } @@ -390,7 +455,7 @@ pub use sp_api_proc_macro::mock_impl_runtime_apis; /// A type that records all accessed trie nodes and generates a proof out of it. #[cfg(feature = "std")] -pub type ProofRecorder = sp_state_machine::ProofRecorder<::Hash>; +pub type ProofRecorder = sp_trie::recorder::Recorder>; /// A type that is used as cache for the storage transactions. #[cfg(feature = "std")] @@ -454,6 +519,8 @@ pub enum ApiError { #[source] error: codec::Error, }, + #[error("The given `StateBackend` isn't a `TrieBackend`.")] + StateBackendIsNotTrie, #[error(transparent)] Application(#[from] Box), } @@ -549,7 +616,7 @@ pub struct CallApiAtParams<'a, Block: BlockT, NC, Backend: StateBackend { /// The state backend that is used to store the block states. - type StateBackend: StateBackend>; + type StateBackend: StateBackend> + AsTrieBackend>; /// Calls the given api function with the given encoded arguments at the given block and returns /// the encoded result. @@ -563,6 +630,9 @@ pub trait CallApiAt { /// Returns the runtime version at the given block. fn runtime_version_at(&self, at: &BlockId) -> Result; + + /// Get the state `at` the given block. + fn state_at(&self, at: &BlockId) -> Result; } /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. diff --git a/primitives/api/test/tests/decl_and_impl.rs b/primitives/api/test/tests/decl_and_impl.rs index 1db416a1d3db6..42628830cc7fa 100644 --- a/primitives/api/test/tests/decl_and_impl.rs +++ b/primitives/api/test/tests/decl_and_impl.rs @@ -18,7 +18,6 @@ use sp_api::{ decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, ApiError, ApiExt, RuntimeApiInfo, }; -use sp_core::NativeOrEncoded; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, GetNodeBlockType}, @@ -47,6 +46,15 @@ decl_runtime_apis! { #[changed_in(2)] fn same_name() -> String; } + + #[api_version(2)] + pub trait ApiWithMultipleVersions { + fn stable_one(data: u64); + #[api_version(3)] + fn new_one(); + #[api_version(4)] + fn glory_one(); + } } impl_runtime_apis! { @@ -72,6 +80,13 @@ impl_runtime_apis! { fn same_name() {} } + #[api_version(3)] + impl self::ApiWithMultipleVersions for Runtime { + fn stable_one(_: u64) {} + + fn new_one() {} + } + impl sp_api::Core for Runtime { fn version() -> sp_version::RuntimeVersion { unimplemented!() @@ -104,22 +119,12 @@ mock_impl_runtime_apis! { } #[advanced] - fn same_name(_: &BlockId) -> - Result< - NativeOrEncoded<()>, - ApiError - > - { + fn same_name(_: &BlockId) -> Result<(), ApiError> { Ok(().into()) } #[advanced] - fn wild_card(at: &BlockId, _: u32) -> - Result< - NativeOrEncoded<()>, - ApiError - > - { + fn wild_card(at: &BlockId, _: u32) -> Result<(), ApiError> { if let BlockId::Number(1337) = at { // yeah Ok(().into()) @@ -176,6 +181,9 @@ fn check_runtime_api_info() { &runtime_decl_for_ApiWithCustomVersion::ID, ); assert_eq!(>::VERSION, 2); + + // The stable version of the API + assert_eq!(>::VERSION, 2); } fn check_runtime_api_versions_contains() { @@ -186,6 +194,9 @@ fn check_runtime_api_versions_contains() { fn check_runtime_api_versions() { check_runtime_api_versions_contains::>(); check_runtime_api_versions_contains::>(); + assert!(RUNTIME_API_VERSIONS + .iter() + .any(|v| v == &(>::ID, 3))); check_runtime_api_versions_contains::>(); } @@ -198,7 +209,7 @@ fn mock_runtime_api_has_api() { } #[test] -#[should_panic(expected = "Mocked runtime apis don't support calling deprecated api versions")] +#[should_panic(expected = "Calling deprecated methods is not supported by mocked runtime api.")] fn mock_runtime_api_panics_on_calling_old_version() { let mock = MockApi { block: None }; diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index ba42b342377c7..2ac88c7e6c04f 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -25,7 +25,7 @@ use sp_state_machine::{ }; use substrate_test_runtime_client::{ prelude::*, - runtime::{Block, DecodeFails, Header, TestAPI, Transfer}, + runtime::{Block, Header, TestAPI, Transfer}, DefaultTestClientBuilderExt, TestClientBuilder, }; @@ -51,28 +51,6 @@ fn calling_wasm_runtime_function() { calling_function_with_strat(ExecutionStrategy::AlwaysWasm); } -#[test] -#[should_panic(expected = "FailedToConvertParameter { function: \"fail_convert_parameter\"")] -fn calling_native_runtime_function_with_non_decodable_parameter() { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) - .build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - runtime_api.fail_convert_parameter(&block_id, DecodeFails::default()).unwrap(); -} - -#[test] -#[should_panic(expected = "FailedToConvertReturnValue { function: \"fail_convert_return_value\"")] -fn calling_native_runtime_function_with_non_decodable_return_value() { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) - .build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - runtime_api.fail_convert_return_value(&block_id).unwrap(); -} - #[test] fn calling_native_runtime_signature_changed_function() { let client = TestClientBuilder::new() @@ -170,10 +148,8 @@ fn record_proof_works() { .build_with_longest_chain(); let block_id = BlockId::Number(client.chain_info().best_number); - let storage_root = futures::executor::block_on(longest_chain.best_chain()) - .unwrap() - .state_root() - .clone(); + let storage_root = + *futures::executor::block_on(longest_chain.best_chain()).unwrap().state_root(); let runtime_code = sp_core::traits::RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode( diff --git a/primitives/api/test/tests/trybuild.rs b/primitives/api/test/tests/trybuild.rs index f3d6aa59a0336..13af1ded7dc6b 100644 --- a/primitives/api/test/tests/trybuild.rs +++ b/primitives/api/test/tests/trybuild.rs @@ -30,4 +30,5 @@ fn ui() { let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); + t.pass("tests/ui/positive_cases/*.rs"); } diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index b1478e2f53344..2c47c2f480add 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -15,49 +15,6 @@ note: type in trait = note: expected fn pointer `fn(u64)` found fn pointer `fn(std::string::String)` -error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait - --> tests/ui/impl_incorrect_method_signature.rs:17:1 - | -17 | sp_api::impl_runtime_apis! { - | -^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | _expected `u64`, found struct `std::string::String` - | | -18 | | impl self::Api for Runtime { -19 | | fn test(data: String) {} -20 | | } -... | -32 | | } -33 | | } - | |_- help: change the parameter type to match the trait: `std::option::Option` - | -note: type in trait - --> tests/ui/impl_incorrect_method_signature.rs:11:1 - | -11 | / sp_api::decl_runtime_apis! { -12 | | pub trait Api { -13 | | fn test(data: u64); -14 | | } -15 | | } - | |_^ - = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` - found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` - = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0308]: mismatched types - --> tests/ui/impl_incorrect_method_signature.rs:17:1 - | -17 | / sp_api::impl_runtime_apis! { -18 | | impl self::Api for Runtime { -19 | | fn test(data: String) {} -20 | | } -... | -32 | | } -33 | | } - | |_^ expected `u64`, found struct `std::string::String` - | - = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - error[E0308]: mismatched types --> tests/ui/impl_incorrect_method_signature.rs:19:11 | diff --git a/primitives/api/test/tests/ui/impl_missing_version.rs b/primitives/api/test/tests/ui/impl_missing_version.rs new file mode 100644 index 0000000000000..63e0599622ac9 --- /dev/null +++ b/primitives/api/test/tests/ui/impl_missing_version.rs @@ -0,0 +1,40 @@ +use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} +impl GetNodeBlockType for Runtime { + type NodeBlock = Block; +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + fn test1(); + fn test2(); + #[api_version(3)] + fn test3(); + } +} + +sp_api::impl_runtime_apis! { + #[api_version(4)] + impl self::Api for Runtime { + fn test1() {} + fn test2() {} + fn test3() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/impl_missing_version.stderr b/primitives/api/test/tests/ui/impl_missing_version.stderr new file mode 100644 index 0000000000000..c0abeffe0cccf --- /dev/null +++ b/primitives/api/test/tests/ui/impl_missing_version.stderr @@ -0,0 +1,14 @@ +error[E0433]: failed to resolve: could not find `ApiV4` in `runtime_decl_for_Api` + --> tests/ui/impl_missing_version.rs:21:13 + | +21 | impl self::Api for Runtime { + | ^^^ could not find `ApiV4` in `runtime_decl_for_Api` + +error[E0405]: cannot find trait `ApiV4` in module `self::runtime_decl_for_Api` + --> tests/ui/impl_missing_version.rs:21:13 + | +11 | pub trait Api { + | ------------- similarly named trait `ApiV2` defined here +... +21 | impl self::Api for Runtime { + | ^^^ help: a trait with a similar name exists: `ApiV2` diff --git a/primitives/api/test/tests/ui/invalid_api_version.rs b/primitives/api/test/tests/ui/invalid_api_version_1.rs similarity index 100% rename from primitives/api/test/tests/ui/invalid_api_version.rs rename to primitives/api/test/tests/ui/invalid_api_version_1.rs diff --git a/primitives/api/test/tests/ui/invalid_api_version.stderr b/primitives/api/test/tests/ui/invalid_api_version_1.stderr similarity index 65% rename from primitives/api/test/tests/ui/invalid_api_version.stderr rename to primitives/api/test/tests/ui/invalid_api_version_1.stderr index 7770bc70e72d6..53ffce959bb66 100644 --- a/primitives/api/test/tests/ui/invalid_api_version.stderr +++ b/primitives/api/test/tests/ui/invalid_api_version_1.stderr @@ -1,5 +1,5 @@ error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> $DIR/invalid_api_version.rs:2:4 + --> tests/ui/invalid_api_version_1.rs:2:2 | 2 | #[api_version] - | ^^^^^^^^^^^ + | ^ diff --git a/primitives/api/test/tests/ui/invalid_api_version_2.stderr b/primitives/api/test/tests/ui/invalid_api_version_2.stderr index 7ca6a7eebe49c..0c5274d4680ff 100644 --- a/primitives/api/test/tests/ui/invalid_api_version_2.stderr +++ b/primitives/api/test/tests/ui/invalid_api_version_2.stderr @@ -1,5 +1,5 @@ error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> $DIR/invalid_api_version_2.rs:2:4 + --> tests/ui/invalid_api_version_2.rs:2:2 | 2 | #[api_version("1")] - | ^^^^^^^^^^^ + | ^ diff --git a/primitives/api/test/tests/ui/invalid_api_version_3.stderr b/primitives/api/test/tests/ui/invalid_api_version_3.stderr index cef4763a6de96..4a34a7aa9b47a 100644 --- a/primitives/api/test/tests/ui/invalid_api_version_3.stderr +++ b/primitives/api/test/tests/ui/invalid_api_version_3.stderr @@ -1,5 +1,5 @@ error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> $DIR/invalid_api_version_3.rs:2:4 + --> tests/ui/invalid_api_version_3.rs:2:2 | 2 | #[api_version()] - | ^^^^^^^^^^^ + | ^ diff --git a/primitives/api/test/tests/ui/invalid_api_version_4.rs b/primitives/api/test/tests/ui/invalid_api_version_4.rs new file mode 100644 index 0000000000000..37b5b6ffa25d1 --- /dev/null +++ b/primitives/api/test/tests/ui/invalid_api_version_4.rs @@ -0,0 +1,8 @@ +sp_api::decl_runtime_apis! { + pub trait Api { + #[api_version("1")] + fn test(data: u64); + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/invalid_api_version_4.stderr b/primitives/api/test/tests/ui/invalid_api_version_4.stderr new file mode 100644 index 0000000000000..57541a97f6c91 --- /dev/null +++ b/primitives/api/test/tests/ui/invalid_api_version_4.stderr @@ -0,0 +1,5 @@ +error: Unexpected `api_version` attribute. The supported format is `api_version(1)` + --> tests/ui/invalid_api_version_4.rs:3:3 + | +3 | #[api_version("1")] + | ^ diff --git a/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs new file mode 100644 index 0000000000000..b4f43cd401bba --- /dev/null +++ b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs @@ -0,0 +1,9 @@ +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + #[api_version(1)] + fn test(data: u64); + } +} + +fn main() {} \ No newline at end of file diff --git a/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr new file mode 100644 index 0000000000000..ec4b594023a05 --- /dev/null +++ b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr @@ -0,0 +1,11 @@ +error: Method version `1` is older than (or equal to) trait version `2`.Methods can't define versions older than the trait version. + --> tests/ui/method_ver_lower_than_trait_ver.rs:4:3 + | +4 | #[api_version(1)] + | ^ + +error: Trait version is set here. + --> tests/ui/method_ver_lower_than_trait_ver.rs:2:2 + | +2 | #[api_version(2)] + | ^ diff --git a/primitives/api/test/tests/ui/missing_versioned_method.rs b/primitives/api/test/tests/ui/missing_versioned_method.rs new file mode 100644 index 0000000000000..d973a94c2101d --- /dev/null +++ b/primitives/api/test/tests/ui/missing_versioned_method.rs @@ -0,0 +1,39 @@ +use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} +impl GetNodeBlockType for Runtime { + type NodeBlock = Block; +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + fn test1(); + fn test2(); + #[api_version(3)] + fn test3(); + } +} + +sp_api::impl_runtime_apis! { + #[api_version(3)] + impl self::Api for Runtime { + fn test1() {} + fn test2() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/missing_versioned_method.stderr b/primitives/api/test/tests/ui/missing_versioned_method.stderr new file mode 100644 index 0000000000000..e3ace7979c27e --- /dev/null +++ b/primitives/api/test/tests/ui/missing_versioned_method.stderr @@ -0,0 +1,8 @@ +error[E0046]: not all trait items implemented, missing: `test3` + --> tests/ui/missing_versioned_method.rs:21:2 + | +15 | fn test3(); + | ----------- `test3` from trait +... +21 | impl self::Api for Runtime { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `test3` in implementation diff --git a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs new file mode 100644 index 0000000000000..72358b99164d5 --- /dev/null +++ b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs @@ -0,0 +1,42 @@ +use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} +impl GetNodeBlockType for Runtime { + type NodeBlock = Block; +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + fn test1(); + fn test2(); + #[api_version(3)] + fn test3(); + #[api_version(4)] + fn test4(); + } +} + +sp_api::impl_runtime_apis! { + #[api_version(4)] + impl self::Api for Runtime { + fn test1() {} + fn test2() {} + fn test4() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr new file mode 100644 index 0000000000000..7354fbd537fd7 --- /dev/null +++ b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr @@ -0,0 +1,8 @@ +error[E0046]: not all trait items implemented, missing: `test3` + --> tests/ui/missing_versioned_method_multiple_vers.rs:23:2 + | +15 | fn test3(); + | ----------- `test3` from trait +... +23 | impl self::Api for Runtime { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `test3` in implementation diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs index fd654ffdc63d6..aeef40f4c77d6 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs @@ -12,7 +12,7 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { #[advanced] - fn test(&self, _: BlockId) -> Result, ApiError> { + fn test(&self, _: BlockId) -> Result<(), ApiError> { Ok(().into()) } } diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr index befe67c1d0b4a..3b3c4e94c3121 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr @@ -1,10 +1,10 @@ error: `BlockId` needs to be taken by reference and not by value! - --> $DIR/mock_advanced_block_id_by_value.rs:12:1 + --> tests/ui/mock_advanced_block_id_by_value.rs:12:1 | 12 | / sp_api::mock_impl_runtime_apis! { 13 | | impl Api for MockApi { 14 | | #[advanced] -15 | | fn test(&self, _: BlockId) -> Result, ApiError> { +15 | | fn test(&self, _: BlockId) -> Result<(), ApiError> { ... | 18 | | } 19 | | } diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs index a15ef133fa6c4..76bf5f1aa7459 100644 --- a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs @@ -12,7 +12,7 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { #[advanced] - fn test(&self) -> Result, ApiError> { + fn test(&self) -> Result<(), ApiError> { Ok(().into()) } } diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr index 87d3660316b1e..b9ce7324b5caa 100644 --- a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr @@ -1,5 +1,5 @@ error: If using the `advanced` attribute, it is required that the function takes at least one argument, the `BlockId`. - --> $DIR/mock_advanced_missing_blockid.rs:15:3 + --> tests/ui/mock_advanced_missing_blockid.rs:15:3 | -15 | fn test(&self) -> Result, ApiError> { +15 | fn test(&self) -> Result<(), ApiError> { | ^^ diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr index c67de70b9c140..430f63eee1660 100644 --- a/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -10,62 +10,80 @@ error: Only `&self` is supported! 16 | fn test2(&mut self, data: u64) {} | ^ -error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait +error[E0050]: method `test` has 2 parameters but the declaration in trait `Api::test` has 3 --> tests/ui/mock_only_self_reference.rs:12:1 | -12 | sp_api::mock_impl_runtime_apis! { - | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | _expected `u64`, found `()` - | | +3 | / sp_api::decl_runtime_apis! { +4 | | pub trait Api { +5 | | fn test(data: u64); + | |_________________________- trait requires 3 parameters +... +12 | / sp_api::mock_impl_runtime_apis! { 13 | | impl Api for MockApi { 14 | | fn test(self, data: u64) {} 15 | | 16 | | fn test2(&mut self, data: u64) {} 17 | | } 18 | | } - | |_- help: change the parameter type to match the trait: `Option` + | |_^ expected 3 parameters, found 2 | -note: type in trait - --> tests/ui/mock_only_self_reference.rs:3:1 + = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0050]: method `test2` has 2 parameters but the declaration in trait `Api::test2` has 3 + --> tests/ui/mock_only_self_reference.rs:12:1 | 3 | / sp_api::decl_runtime_apis! { 4 | | pub trait Api { 5 | | fn test(data: u64); 6 | | fn test2(data: u64); -7 | | } -8 | | } - | |_^ - = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` - found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` + | |__________________________- trait requires 3 parameters +... +12 | / sp_api::mock_impl_runtime_apis! { +13 | | impl Api for MockApi { +14 | | fn test(self, data: u64) {} +15 | | +16 | | fn test2(&mut self, data: u64) {} +17 | | } +18 | | } + | |_^ expected 3 parameters, found 2 + | = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for trait +error[E0050]: method `test_with_context` has 3 parameters but the declaration in trait `Api::test_with_context` has 4 --> tests/ui/mock_only_self_reference.rs:12:1 | -12 | sp_api::mock_impl_runtime_apis! { - | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | _expected `u64`, found `()` - | | +3 | / sp_api::decl_runtime_apis! { +4 | | pub trait Api { +5 | | fn test(data: u64); + | |_________________________- trait requires 4 parameters +... +12 | / sp_api::mock_impl_runtime_apis! { 13 | | impl Api for MockApi { 14 | | fn test(self, data: u64) {} 15 | | 16 | | fn test2(&mut self, data: u64) {} 17 | | } 18 | | } - | |_- help: change the parameter type to match the trait: `Option` + | |_^ expected 4 parameters, found 3 | -note: type in trait - --> tests/ui/mock_only_self_reference.rs:3:1 + = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0050]: method `test2_with_context` has 3 parameters but the declaration in trait `Api::test2_with_context` has 4 + --> tests/ui/mock_only_self_reference.rs:12:1 | 3 | / sp_api::decl_runtime_apis! { 4 | | pub trait Api { 5 | | fn test(data: u64); 6 | | fn test2(data: u64); -7 | | } -8 | | } - | |_^ - = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` - found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` + | |__________________________- trait requires 4 parameters +... +12 | / sp_api::mock_impl_runtime_apis! { +13 | | impl Api for MockApi { +14 | | fn test(self, data: u64) {} +15 | | +16 | | fn test2(&mut self, data: u64) {} +17 | | } +18 | | } + | |_^ expected 4 parameters, found 3 + | = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/positive_cases/default_impls.rs b/primitives/api/test/tests/ui/positive_cases/default_impls.rs new file mode 100644 index 0000000000000..3434db1089f05 --- /dev/null +++ b/primitives/api/test/tests/ui/positive_cases/default_impls.rs @@ -0,0 +1,41 @@ +use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} +impl GetNodeBlockType for Runtime { + type NodeBlock = Block; +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + fn test1(); + fn test2(); + #[api_version(3)] + fn test3(); + #[api_version(4)] + fn test4(); + } +} + +sp_api::impl_runtime_apis! { + #[api_version(2)] + impl self::Api for Runtime { + fn test1() {} + fn test2() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index dbc0f6def3aa5..479e1cf05a9d1 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -15,49 +15,6 @@ note: type in trait = note: expected fn pointer `fn(u64)` found fn pointer `fn(&u64)` -error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:17:1 - | -17 | sp_api::impl_runtime_apis! { - | -^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | _expected `u64`, found `&u64` - | | -18 | | impl self::Api for Runtime { -19 | | fn test(data: &u64) { -20 | | unimplemented!() -... | -34 | | } -35 | | } - | |_- help: change the parameter type to match the trait: `std::option::Option` - | -note: type in trait - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:11:1 - | -11 | / sp_api::decl_runtime_apis! { -12 | | pub trait Api { -13 | | fn test(data: u64); -14 | | } -15 | | } - | |_^ - = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` - found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option<&u64>, Vec<_>) -> Result<_, _>` - = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0308]: mismatched types - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:17:1 - | -17 | / sp_api::impl_runtime_apis! { -18 | | impl self::Api for Runtime { -19 | | fn test(data: &u64) { -20 | | unimplemented!() -... | -34 | | } -35 | | } - | |_^ expected `u64`, found `&u64` - | - = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - error[E0308]: mismatched types --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:19:11 | diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index 714d0a2610312..3a8cb3f37cbd3 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } +async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } diff --git a/primitives/beefy/src/commitment.rs b/primitives/beefy/src/commitment.rs index ed392139de13f..ddf58474e77a0 100644 --- a/primitives/beefy/src/commitment.rs +++ b/primitives/beefy/src/commitment.rs @@ -293,6 +293,12 @@ pub enum VersionedFinalityProof { V1(SignedCommitment), } +impl From> for VersionedFinalityProof { + fn from(commitment: SignedCommitment) -> Self { + VersionedFinalityProof::V1(commitment) + } +} + #[cfg(test)] mod tests { diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index a5137606e16a2..4d4718b4038d4 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = futures = "0.3.21" log = "0.4.17" lru = "0.7.5" -parking_lot = "0.12.0" +parking_lot = "0.12.1" thiserror = "1.0.30" sp-api = { version = "4.0.0-dev", path = "../api" } sp-consensus = { version = "0.10.0-dev", path = "../consensus/common" } diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index dbb67a27c5144..30f5c89650a78 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } +async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 736a78ab67b1a..049e511175867 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } +async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } merlin = { version = "2.0", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 973cb3e410e0d..d160cd118998c 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.42" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } diff --git a/primitives/consensus/common/src/evaluation.rs b/primitives/consensus/common/src/evaluation.rs deleted file mode 100644 index d1ce8e9fc5109..0000000000000 --- a/primitives/consensus/common/src/evaluation.rs +++ /dev/null @@ -1,74 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Block evaluation and evaluation errors. - -use codec::Encode; -use sp_runtime::traits::{Block as BlockT, CheckedConversion, Header as HeaderT, One}; - -// This is just a best effort to encode the number. None indicated that it's too big to encode -// in a u128. -type BlockNumber = Option; - -/// Result type alias. -pub type Result = std::result::Result; - -/// Error type. -#[derive(Debug, thiserror::Error)] -pub enum Error { - #[error("Failed to verify block encoding/decoding")] - BadBlockCodec, - /// Proposal provided not a block. - #[error("Proposal provided not a block: decoding error: {0}")] - BadProposalFormat(#[from] codec::Error), - /// Proposal had wrong parent hash. - #[error("Proposal had wrong parent hash. Expected {expected:?}, got {got:?}")] - WrongParentHash { expected: String, got: String }, - /// Proposal had wrong number. - #[error("Proposal had wrong number. Expected {expected:?}, got {got:?}")] - WrongNumber { expected: BlockNumber, got: BlockNumber }, -} - -/// Attempt to evaluate a substrate block as a node block, returning error -/// upon any initial validity checks failing. -pub fn evaluate_initial( - proposal: &Block, - parent_hash: &::Hash, - parent_number: <::Header as HeaderT>::Number, -) -> Result<()> { - let encoded = Encode::encode(proposal); - let block = Block::decode(&mut &encoded[..]).map_err(Error::BadProposalFormat)?; - if &block != proposal { - return Err(Error::BadBlockCodec) - } - - if *parent_hash != *block.header().parent_hash() { - return Err(Error::WrongParentHash { - expected: format!("{:?}", *parent_hash), - got: format!("{:?}", block.header().parent_hash()), - }) - } - - if parent_number + One::one() != *block.header().number() { - return Err(Error::WrongNumber { - expected: parent_number.checked_into::().map(|x| x + 1), - got: (*block.header().number()).checked_into::(), - }) - } - - Ok(()) -} diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 2743f434c209b..043533cbf2258 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -33,7 +33,6 @@ use sp_state_machine::StorageProof; pub mod block_validation; pub mod error; -pub mod evaluation; mod select_chain; pub use self::error::Error; @@ -237,10 +236,10 @@ pub trait Proposer { pub trait SyncOracle { /// Whether the synchronization service is undergoing major sync. /// Returns true if so. - fn is_major_syncing(&mut self) -> bool; + fn is_major_syncing(&self) -> bool; /// Whether the synchronization service is offline. /// Returns true if so. - fn is_offline(&mut self) -> bool; + fn is_offline(&self) -> bool; } /// A synchronization oracle for when there is no network. @@ -248,10 +247,10 @@ pub trait SyncOracle { pub struct NoNetwork; impl SyncOracle for NoNetwork { - fn is_major_syncing(&mut self) -> bool { + fn is_major_syncing(&self) -> bool { false } - fn is_offline(&mut self) -> bool { + fn is_offline(&self) -> bool { false } } @@ -259,14 +258,14 @@ impl SyncOracle for NoNetwork { impl SyncOracle for Arc where T: ?Sized, - for<'r> &'r T: SyncOracle, + T: SyncOracle, { - fn is_major_syncing(&mut self) -> bool { - <&T>::is_major_syncing(&mut &**self) + fn is_major_syncing(&self) -> bool { + T::is_major_syncing(self) } - fn is_offline(&mut self) -> bool { - <&T>::is_offline(&mut &**self) + fn is_offline(&self) -> bool { + T::is_offline(self) } } diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index cdc12c677e4f3..d591657ee4bd9 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -35,7 +35,7 @@ num-traits = { version = "0.2.8", default-features = false } zeroize = { version = "1.4.3", default-features = false } secrecy = { version = "0.8.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } -parking_lot = { version = "0.12.0", optional = true } +parking_lot = { version = "0.12.1", optional = true } sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } sp-storage = { version = "6.0.0", default-features = false, path = "../storage" } @@ -47,7 +47,7 @@ thiserror = { version = "1.0.30", optional = true } bitflags = "1.3" # full crypto -ed25519-dalek = { version = "1.0.1", default-features = false, features = ["u64_backend", "alloc"], optional = true } +ed25519-zebra = { version = "3.0.0", default-features = false, optional = true} blake2-rfc = { version = "0.2.18", default-features = false, optional = true } schnorrkel = { version = "0.9.1", features = [ "preaudit_deprecated", @@ -56,7 +56,7 @@ schnorrkel = { version = "0.9.1", features = [ hex = { version = "0.4", default-features = false, optional = true } libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } merlin = { version = "2.0", default-features = false, optional = true } -secp256k1 = { version = "0.21.2", default-features = false, features = ["recovery", "alloc"], optional = true } +secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"], optional = true } ss58-registry = { version = "1.18.0", default-features = false } sp-core-hashing = { version = "4.0.0", path = "./hashing", default-features = false, optional = true } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../runtime-interface" } @@ -97,7 +97,7 @@ std = [ "sp-std/std", "serde", "blake2-rfc/std", - "ed25519-dalek/std", + "ed25519-zebra", "hex/std", "base58", "substrate-bip39", @@ -127,7 +127,7 @@ std = [ # or Intel SGX. # For the regular wasm runtime builds this should not be used. full_crypto = [ - "ed25519-dalek", + "ed25519-zebra", "blake2-rfc", "schnorrkel", "hex", diff --git a/primitives/core/src/defer.rs b/primitives/core/src/defer.rs new file mode 100644 index 0000000000000..d14b26d59e4dd --- /dev/null +++ b/primitives/core/src/defer.rs @@ -0,0 +1,140 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Contains the [`crate::defer!`] macro for *deferring* the execution +//! of code until the current scope is dropped. +//! This helps with *always* executing cleanup code. + +/// Executes the wrapped closure on drop. +/// +/// Should be used together with the [`crate::defer!`] macro. +#[must_use] +pub struct DeferGuard(pub Option); + +impl Drop for DeferGuard { + fn drop(&mut self) { + self.0.take().map(|f| f()); + } +} + +/// Executes the given code when the current scope is dropped. +/// +/// Multiple calls to [`crate::defer!`] will execute the passed codes in reverse order. +/// This also applies to panic stack unwinding. +/// +/// # Example +/// +/// ```rust +/// use sp_core::defer; +/// +/// let message = std::cell::RefCell::new("".to_string()); +/// { +/// defer!( +/// message.borrow_mut().push_str("world!"); +/// ); +/// defer!( +/// message.borrow_mut().push_str("Hello "); +/// ); +/// } +/// assert_eq!(*message.borrow(), "Hello world!"); +/// ``` +#[macro_export] +macro_rules! defer( + ( $( $code:tt )* ) => { + let _guard = $crate::defer::DeferGuard(Some(|| { $( $code )* })); + }; +); + +#[cfg(test)] +mod test { + #[test] + fn defer_guard_works() { + let mut called = false; + { + defer!( + called = true; + ); + } + assert!(called, "DeferGuard should have executed the closure"); + } + + #[test] + /// `defer` executes the code in reverse order of being called. + fn defer_guard_order_works() { + let called = std::cell::RefCell::new(1); + + defer!( + assert_eq!(*called.borrow(), 3); + ); + defer!( + assert_eq!(*called.borrow(), 2); + *called.borrow_mut() = 3; + ); + defer!({ + assert_eq!(*called.borrow(), 1); + *called.borrow_mut() = 2; + }); + } + + #[test] + #[allow(unused_braces)] + #[allow(clippy::unnecessary_operation)] + fn defer_guard_syntax_works() { + let called = std::cell::RefCell::new(0); + { + defer!(*called.borrow_mut() += 1); + defer!(*called.borrow_mut() += 1;); // With ; + defer!({ *called.borrow_mut() += 1 }); + defer!({ *called.borrow_mut() += 1 };); // With ; + } + assert_eq!(*called.borrow(), 4); + } + + #[test] + /// `defer` executes the code even in case of a panic. + fn defer_guard_panic_unwind_works() { + use std::panic::{catch_unwind, AssertUnwindSafe}; + let mut called = false; + + let should_panic = catch_unwind(AssertUnwindSafe(|| { + defer!(called = true); + panic!(); + })); + + assert!(should_panic.is_err(), "DeferGuard should have panicked"); + assert!(called, "DeferGuard should have executed the closure"); + } + + #[test] + /// `defer` executes the code even in case another `defer` panics. + fn defer_guard_defer_panics_unwind_works() { + use std::panic::{catch_unwind, AssertUnwindSafe}; + let counter = std::cell::RefCell::new(0); + + let should_panic = catch_unwind(AssertUnwindSafe(|| { + defer!(*counter.borrow_mut() += 1); + defer!( + *counter.borrow_mut() += 1; + panic!(); + ); + defer!(*counter.borrow_mut() += 1); + })); + + assert!(should_panic.is_err(), "DeferGuard should have panicked"); + assert_eq!(*counter.borrow(), 3, "DeferGuard should have executed the closure"); + } +} diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index 485e39d3a71db..d56f65fd289e7 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -499,7 +499,7 @@ impl TraitPair for Pair { impl Pair { /// Get the seed for this key. pub fn seed(&self) -> Seed { - self.secret.serialize_secret() + self.secret.secret_bytes() } /// Exactly as `from_string` except that if no matches are found then, the the first 32 diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index 177af0651c0ef..0553cf4843df5 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -39,7 +39,9 @@ use crate::crypto::{DeriveJunction, Pair as TraitPair, SecretStringError}; #[cfg(feature = "std")] use bip39::{Language, Mnemonic, MnemonicType}; #[cfg(feature = "full_crypto")] -use ed25519_dalek::{Signer as _, Verifier as _}; +use core::convert::TryFrom; +#[cfg(feature = "full_crypto")] +use ed25519_zebra::{SigningKey, VerificationKey}; #[cfg(feature = "std")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use sp_runtime_interface::pass_by::PassByInner; @@ -75,17 +77,10 @@ pub struct Public(pub [u8; 32]); /// A key pair. #[cfg(feature = "full_crypto")] -pub struct Pair(ed25519_dalek::Keypair); - -#[cfg(feature = "full_crypto")] -impl Clone for Pair { - fn clone(&self) -> Self { - Pair(ed25519_dalek::Keypair { - public: self.0.public, - secret: ed25519_dalek::SecretKey::from_bytes(self.0.secret.as_bytes()) - .expect("key is always the correct size; qed"), - }) - } +#[derive(Copy, Clone)] +pub struct Pair { + public: VerificationKey, + secret: SigningKey, } impl AsRef<[u8; 32]> for Public { @@ -456,10 +451,10 @@ impl TraitPair for Pair { /// /// You should never need to use this; generate(), generate_with_phrase fn from_seed_slice(seed_slice: &[u8]) -> Result { - let secret = ed25519_dalek::SecretKey::from_bytes(seed_slice) - .map_err(|_| SecretStringError::InvalidSeedLength)?; - let public = ed25519_dalek::PublicKey::from(&secret); - Ok(Pair(ed25519_dalek::Keypair { secret, public })) + let secret = + SigningKey::try_from(seed_slice).map_err(|_| SecretStringError::InvalidSeedLength)?; + let public = VerificationKey::from(&secret); + Ok(Pair { secret, public }) } /// Derive a child key from a series of given junctions. @@ -468,7 +463,7 @@ impl TraitPair for Pair { path: Iter, _seed: Option, ) -> Result<(Pair, Option), DeriveError> { - let mut acc = self.0.secret.to_bytes(); + let mut acc = self.secret.into(); for j in path { match j { DeriveJunction::Soft(_cc) => return Err(DeriveError::SoftKeyInPath), @@ -480,16 +475,12 @@ impl TraitPair for Pair { /// Get the public key. fn public(&self) -> Public { - let mut r = [0u8; 32]; - let pk = self.0.public.as_bytes(); - r.copy_from_slice(pk); - Public(r) + Public(self.public.into()) } /// Sign a message. fn sign(&self, message: &[u8]) -> Signature { - let r = self.0.sign(message).to_bytes(); - Signature::from_raw(r) + Signature::from_raw(self.secret.sign(message).into()) } /// Verify a signature on a message. Returns true if the signature is good. @@ -502,17 +493,17 @@ impl TraitPair for Pair { /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct /// size. Use it only if you're coming from byte buffers and need the speed. fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - let public_key = match ed25519_dalek::PublicKey::from_bytes(pubkey.as_ref()) { + let public_key = match VerificationKey::try_from(pubkey.as_ref()) { Ok(pk) => pk, Err(_) => return false, }; - let sig = match ed25519_dalek::Signature::try_from(sig) { + let sig = match ed25519_zebra::Signature::try_from(sig) { Ok(s) => s, Err(_) => return false, }; - public_key.verify(message.as_ref(), &sig).is_ok() + public_key.verify(&sig, message.as_ref()).is_ok() } /// Return a vec filled with raw data. @@ -524,8 +515,8 @@ impl TraitPair for Pair { #[cfg(feature = "full_crypto")] impl Pair { /// Get the seed for this key. - pub fn seed(&self) -> &Seed { - self.0.secret.as_bytes() + pub fn seed(&self) -> Seed { + self.secret.into() } /// Exactly as `from_string` except that if no matches are found then, the the first 32 @@ -577,12 +568,12 @@ mod test { fn seed_and_derive_should_work() { let seed = hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60"); let pair = Pair::from_seed(&seed); - assert_eq!(pair.seed(), &seed); + assert_eq!(pair.seed(), seed); let path = vec![DeriveJunction::Hard([0u8; 32])]; let derived = pair.derive(path.into_iter(), None).ok().unwrap().0; assert_eq!( derived.seed(), - &hex!("ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c") + hex!("ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c") ); } diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index ab3334f9e4f5a..f48adc274f524 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -56,6 +56,7 @@ pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_6 pub mod crypto; pub mod hexdisplay; +pub mod defer; pub mod ecdsa; pub mod ed25519; pub mod hash; diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs index d5ca1dc45fa0c..d3fa3fc86fce5 100644 --- a/primitives/core/src/testing.rs +++ b/primitives/core/src/testing.rs @@ -90,7 +90,7 @@ macro_rules! wasm_export_functions { &mut &input[..], ).expect("Input data is correctly encoded"); - $( $fn_impl )* + (|| { $( $fn_impl )* })() } $crate::to_substrate_wasm_fn_return_value(&()) @@ -118,7 +118,7 @@ macro_rules! wasm_export_functions { &mut &input[..], ).expect("Input data is correctly encoded"); - $( $fn_impl )* + (|| { $( $fn_impl )* })() }; $crate::to_substrate_wasm_fn_return_value(&output) diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index 5aa3d9a239aa3..a3f09536f4f5c 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -12,4 +12,4 @@ readme = "README.md" [dependencies] kvdb = "0.11.0" -parking_lot = "0.12.0" +parking_lot = "0.12.1" diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index c7e10be32fe28..628f938880e9f 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } +async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { version = "1.0.30", optional = true } diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 09a087d509416..2e271d3949dee 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -15,7 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +bytes = { version = "1.1.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.1.3", default-features = false, features = ["bytes"] } hash-db = { version = "0.15.2", default-features = false } sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-keystore = { version = "0.12.0", default-features = false, optional = true, path = "../keystore" } @@ -29,10 +30,10 @@ sp-externalities = { version = "0.12.0", default-features = false, path = "../ex sp-tracing = { version = "5.0.0", default-features = false, path = "../tracing" } log = { version = "0.4.17", optional = true } futures = { version = "0.3.21", features = ["thread-pool"], optional = true } -parking_lot = { version = "0.12.0", optional = true } -secp256k1 = { version = "0.21.2", features = ["recovery", "global-context"], optional = true } +parking_lot = { version = "0.12.1", optional = true } +secp256k1 = { version = "0.24.0", features = ["recovery", "global-context"], optional = true } tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.26", default-features = false} +tracing-core = { version = "0.1.28", default-features = false} [features] default = ["std"] diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index edffc37351147..7942bafcc2a1b 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -125,8 +125,8 @@ impl From for KillStorageResult { #[runtime_interface] pub trait Storage { /// Returns the data for `key` in the storage or `None` if the key can not be found. - fn get(&self, key: &[u8]) -> Option> { - self.storage(key).map(|s| s.to_vec()) + fn get(&self, key: &[u8]) -> Option { + self.storage(key).map(|s| bytes::Bytes::from(s.to_vec())) } /// Get `key` from storage, placing the value into `value_out` and return the number of @@ -1787,7 +1787,7 @@ mod tests { t.execute_with(|| { assert_eq!(storage::get(b"hello"), None); storage::set(b"hello", b"world"); - assert_eq!(storage::get(b"hello"), Some(b"world".to_vec())); + assert_eq!(storage::get(b"hello"), Some(b"world".to_vec().into())); assert_eq!(storage::get(b"foo"), None); storage::set(b"foo", &[1, 2, 3][..]); }); @@ -1799,7 +1799,7 @@ mod tests { t.execute_with(|| { assert_eq!(storage::get(b"hello"), None); - assert_eq!(storage::get(b"foo"), Some(b"bar".to_vec())); + assert_eq!(storage::get(b"foo"), Some(b"bar".to_vec().into())); }); let value = vec![7u8; 35]; @@ -1809,7 +1809,7 @@ mod tests { t.execute_with(|| { assert_eq!(storage::get(b"hello"), None); - assert_eq!(storage::get(b"foo00"), Some(value.clone())); + assert_eq!(storage::get(b"foo00"), Some(value.clone().into())); }); } @@ -1895,6 +1895,7 @@ mod tests { ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { let pair = sr25519::Pair::generate_with_phrase(None).0; + let pair_unused = sr25519::Pair::generate_with_phrase(None).0; crypto::start_batch_verify(); for it in 0..70 { let msg = format!("Schnorrkel {}!", it); @@ -1902,8 +1903,10 @@ mod tests { crypto::sr25519_batch_verify(&signature, msg.as_bytes(), &pair.public()); } - // push invlaid - crypto::sr25519_batch_verify(&zero_sr_sig(), &Vec::new(), &zero_sr_pub()); + // push invalid + let msg = b"asdf!"; + let signature = pair.sign(msg); + crypto::sr25519_batch_verify(&signature, msg, &pair_unused.public()); assert!(!crypto::finish_batch_verify()); crypto::start_batch_verify(); @@ -1938,10 +1941,10 @@ mod tests { ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { - // invalid ed25519 signature + // valid ed25519 signature crypto::start_batch_verify(); crypto::ed25519_batch_verify(&zero_ed_sig(), &Vec::new(), &zero_ed_pub()); - assert!(!crypto::finish_batch_verify()); + assert!(crypto::finish_batch_verify()); // 2 valid ed25519 signatures crypto::start_batch_verify(); @@ -1961,12 +1964,14 @@ mod tests { // 1 valid, 1 invalid ed25519 signature crypto::start_batch_verify(); - let pair = ed25519::Pair::generate_with_phrase(None).0; + let pair1 = ed25519::Pair::generate_with_phrase(None).0; + let pair2 = ed25519::Pair::generate_with_phrase(None).0; let msg = b"Important message"; - let signature = pair.sign(msg); - crypto::ed25519_batch_verify(&signature, msg, &pair.public()); + let signature = pair1.sign(msg); crypto::ed25519_batch_verify(&zero_ed_sig(), &Vec::new(), &zero_ed_pub()); + crypto::ed25519_batch_verify(&signature, msg, &pair1.public()); + crypto::ed25519_batch_verify(&signature, msg, &pair2.public()); assert!(!crypto::finish_batch_verify()); @@ -1993,11 +1998,13 @@ mod tests { // 1 valid sr25519, 1 invalid sr25519 crypto::start_batch_verify(); - let pair = sr25519::Pair::generate_with_phrase(None).0; + let pair1 = sr25519::Pair::generate_with_phrase(None).0; + let pair2 = sr25519::Pair::generate_with_phrase(None).0; let msg = b"Schnorrkcel!"; - let signature = pair.sign(msg); - crypto::sr25519_batch_verify(&signature, msg, &pair.public()); + let signature = pair1.sign(msg); + crypto::sr25519_batch_verify(&signature, msg, &pair1.public()); + crypto::sr25519_batch_verify(&signature, msg, &pair2.public()); crypto::sr25519_batch_verify(&zero_sr_sig(), &Vec::new(), &zero_sr_pub()); assert!(!crypto::finish_batch_verify()); diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index 8f1adb6cf81f3..982abfd09a553 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -15,6 +15,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] lazy_static = "1.4.0" -strum = { version = "0.23.0", features = ["derive"] } +strum = { version = "0.24.1", features = ["derive"] } sp-core = { version = "6.0.0", path = "../core" } sp-runtime = { version = "6.0.0", path = "../runtime" } diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index 3c3b7933c50da..cbb8a22ba4dd6 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -13,11 +13,11 @@ documentation = "https://docs.rs/sp-core" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } futures = "0.3.21" merlin = { version = "2.0", default-features = false } -parking_lot = { version = "0.12.0", default-features = false } +parking_lot = { version = "0.12.1", default-features = false } schnorrkel = { version = "0.9.1", default-features = false, features = ["preaudit_deprecated", "u64_backend"] } serde = { version = "1.0", optional = true } thiserror = "1.0" diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index 5a339d069062c..8a2e901aefddf 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -81,7 +81,7 @@ pub struct Proof { /// A full leaf content stored in the offchain-db. pub trait FullLeaf: Clone + PartialEq + fmt::Debug { - /// Encode the leaf either in it's full or compact form. + /// Encode the leaf either in its full or compact form. /// /// NOTE the encoding returned here MUST be `Decode`able into `FullLeaf`. fn using_encoded R>(&self, f: F, compact: bool) -> R; @@ -167,18 +167,18 @@ impl EncodableOpaqueLeaf { } } -/// An element representing either full data or it's hash. +/// An element representing either full data or its hash. /// /// See [Compact] to see how it may be used in practice to reduce the size /// of proofs in case multiple [LeafDataProvider]s are composed together. /// This is also used internally by the MMR to differentiate leaf nodes (data) /// and inner nodes (hashes). /// -/// [DataOrHash::hash] method calculates the hash of this element in it's compact form, +/// [DataOrHash::hash] method calculates the hash of this element in its compact form, /// so should be used instead of hashing the encoded form (which will always be non-compact). #[derive(RuntimeDebug, Clone, PartialEq)] pub enum DataOrHash { - /// Arbitrary data in it's full form. + /// Arbitrary data in its full form. Data(L), /// A hash of some data. Hash(H::Output), @@ -339,7 +339,7 @@ where A: FullLeaf, B: FullLeaf, { - /// Retrieve a hash of this item in it's compact form. + /// Retrieve a hash of this item in its compact form. pub fn hash(&self) -> H::Output { self.using_encoded(::hash, true) } @@ -447,7 +447,7 @@ sp_api::decl_runtime_apis! { /// Note this function does not require any on-chain storage - the /// proof is verified against given MMR root hash. /// - /// The leaf data is expected to be encoded in it's compact form. + /// The leaf data is expected to be encoded in its compact form. fn verify_proof_stateless(root: Hash, leaf: EncodableOpaqueLeaf, proof: Proof) -> Result<(), Error>; diff --git a/primitives/npos-elections/fuzzer/src/reduce.rs b/primitives/npos-elections/fuzzer/src/reduce.rs index 605f2d6081a6f..602467a343884 100644 --- a/primitives/npos-elections/fuzzer/src/reduce.rs +++ b/primitives/npos-elections/fuzzer/src/reduce.rs @@ -90,7 +90,7 @@ fn generate_random_phragmen_assignment( let target = targets_to_chose_from.remove(rng.gen_range(0..targets_to_chose_from.len())); if winners.iter().all(|w| *w != target) { - winners.push(target.clone()); + winners.push(target); } (target, rng.gen_range(1 * KSM..100 * KSM)) }) diff --git a/primitives/npos-elections/src/mock.rs b/primitives/npos-elections/src/mock.rs index dd85ce9b6dfae..5a06e3f3c88ca 100644 --- a/primitives/npos-elections/src/mock.rs +++ b/primitives/npos-elections/src/mock.rs @@ -92,7 +92,7 @@ where .into_iter() .enumerate() .map(|(idx, who)| { - c_idx_cache.insert(who.clone(), idx); + c_idx_cache.insert(who, idx); _Candidate { who, ..Default::default() } }) .collect::>>(); @@ -103,7 +103,7 @@ where for v in votes { if let Some(idx) = c_idx_cache.get(&v) { candidates[*idx].approval_stake = candidates[*idx].approval_stake + voter_stake; - edges.push(_Edge { who: v.clone(), candidate_index: *idx, ..Default::default() }); + edges.push(_Edge { who: v, candidate_index: *idx, ..Default::default() }); } } _Voter { who, edges, budget: voter_stake, load: 0f64 } @@ -143,21 +143,21 @@ where } } - elected_candidates.push((winner.who.clone(), winner.approval_stake as ExtendedBalance)); + elected_candidates.push((winner.who, winner.approval_stake as ExtendedBalance)); } else { break } } for n in &mut voters { - let mut assignment = (n.who.clone(), vec![]); + let mut assignment = (n.who, vec![]); for e in &mut n.edges { if let Some(c) = elected_candidates.iter().cloned().map(|(c, _)| c).find(|c| *c == e.who) { if c != n.who { let ratio = e.load / n.load; - assignment.1.push((e.who.clone(), ratio)); + assignment.1.push((e.who, ratio)); } } } @@ -321,7 +321,7 @@ pub(crate) fn run_and_compare( candidates.clone(), voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -368,7 +368,7 @@ pub(crate) fn build_support_map_float( let mut supports = <_SupportMap>::new(); result.winners.iter().map(|(e, _)| (e, stake_of(e) as f64)).for_each(|(e, s)| { let item = _Support { own: s, total: s, ..Default::default() }; - supports.insert(e.clone(), item); + supports.insert(*e, item); }); for (n, assignment) in result.assignments.iter_mut() { @@ -377,7 +377,7 @@ pub(crate) fn build_support_map_float( let other_stake = nominator_stake * *r; if let Some(support) = supports.get_mut(c) { support.total = support.total + other_stake; - support.others.push((n.clone(), other_stake)); + support.others.push((*n, other_stake)); } *r = other_stake; } diff --git a/primitives/npos-elections/src/pjr.rs b/primitives/npos-elections/src/pjr.rs index 914834fbb2aef..fd7c8ef539241 100644 --- a/primitives/npos-elections/src/pjr.rs +++ b/primitives/npos-elections/src/pjr.rs @@ -481,7 +481,7 @@ mod tests { assert_eq!( candidates .iter() - .map(|c| (c.borrow().who.clone(), c.borrow().elected, c.borrow().backed_stake)) + .map(|c| (c.borrow().who, c.borrow().elected, c.borrow().backed_stake)) .collect::>(), vec![(10, false, 0), (20, true, 15), (30, false, 0), (40, true, 15)], ); diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index 5b88889201b31..6f2e4fca77115 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -233,7 +233,7 @@ fn phragmen_poc_works() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -289,7 +289,7 @@ fn phragmen_poc_works_with_balancing() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), Some(config), ) @@ -376,7 +376,7 @@ fn phragmen_accuracy_on_large_scale_only_candidates() { candidates.clone(), auto_generate_self_voters(&candidates) .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -407,7 +407,7 @@ fn phragmen_accuracy_on_large_scale_voters_and_candidates() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -439,7 +439,7 @@ fn phragmen_accuracy_on_small_scale_self_vote() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -469,7 +469,7 @@ fn phragmen_accuracy_on_small_scale_no_self_vote() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -505,7 +505,7 @@ fn phragmen_large_scale_test() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -532,7 +532,7 @@ fn phragmen_large_scale_test_2() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -601,7 +601,7 @@ fn elect_has_no_entry_barrier() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -622,7 +622,7 @@ fn phragmen_self_votes_should_be_kept() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -872,30 +872,15 @@ mod score { let claim = [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054]; - assert_eq!( - is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(1u32, 10_000),), - true, - ); + assert_eq!(is_score_better(claim, initial, Perbill::from_rational(1u32, 10_000),), true,); - assert_eq!( - is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(2u32, 10_000),), - true, - ); + assert_eq!(is_score_better(claim, initial, Perbill::from_rational(2u32, 10_000),), true,); - assert_eq!( - is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(3u32, 10_000),), - true, - ); + assert_eq!(is_score_better(claim, initial, Perbill::from_rational(3u32, 10_000),), true,); - assert_eq!( - is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(4u32, 10_000),), - true, - ); + assert_eq!(is_score_better(claim, initial, Perbill::from_rational(4u32, 10_000),), true,); - assert_eq!( - is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(5u32, 10_000),), - false, - ); + assert_eq!(is_score_better(claim, initial, Perbill::from_rational(5u32, 10_000),), false,); } #[test] diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index 335eb6d6c9a0e..f4a4fe12f6c47 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -18,4 +18,4 @@ serde = { version = "1.0.136", features = ["derive"] } sp-core = { version = "6.0.0", path = "../core" } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index 8cd31bab559ea..a657c98381c9a 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -14,12 +14,13 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +bytes = { version = "1.1.0", default-features = false } sp-wasm-interface = { version = "6.0.0", path = "../wasm-interface", default-features = false } sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-tracing = { version = "5.0.0", default-features = false, path = "../tracing" } sp-runtime-interface-proc-macro = { version = "5.0.0", path = "proc-macro" } sp-externalities = { version = "0.12.0", default-features = false, path = "../externalities" } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["bytes"] } static_assertions = "1.0.0" primitive-types = { version = "0.11.1", default-features = false } sp-storage = { version = "6.0.0", default-features = false, path = "../storage" } diff --git a/primitives/runtime-interface/src/lib.rs b/primitives/runtime-interface/src/lib.rs index f9bf8825f9486..6ebcb7482a779 100644 --- a/primitives/runtime-interface/src/lib.rs +++ b/primitives/runtime-interface/src/lib.rs @@ -308,10 +308,10 @@ pub use sp_std; /// /// 1. The generated functions are not callable from the native side. /// 2. The trait as shown above is not implemented for [`Externalities`] and is instead -/// implemented for `FunctionExecutor` (from `sp-wasm-interface`). +/// implemented for `FunctionContext` (from `sp-wasm-interface`). /// /// # Disable tracing -/// By addding `no_tracing` to the list of options you can prevent the wasm-side interface from +/// By adding `no_tracing` to the list of options you can prevent the wasm-side interface from /// generating the default `sp-tracing`-calls. Note that this is rarely needed but only meant /// for the case when that would create a circular dependency. You usually _do not_ want to add /// this flag, as tracing doesn't cost you anything by default anyways (it is added as a no-op) diff --git a/primitives/runtime-interface/src/pass_by.rs b/primitives/runtime-interface/src/pass_by.rs index 5d895ff5b3f82..ac6f0def9cad0 100644 --- a/primitives/runtime-interface/src/pass_by.rs +++ b/primitives/runtime-interface/src/pass_by.rs @@ -248,12 +248,12 @@ impl PassByImpl for Codec { let len = len as usize; let encoded = if len == 0 { - Vec::new() + bytes::Bytes::new() } else { - unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) } + bytes::Bytes::from(unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) }) }; - T::decode(&mut &encoded[..]).expect("Host to wasm values are encoded correctly; qed") + codec::decode_from_bytes(encoded).expect("Host to wasm values are encoded correctly; qed") } } diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index f0e78e0e536b9..e9b2937227db6 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -13,6 +13,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] +bytes = { version = "1.1.0", default-features = false } sp-core = { version = "6.0.0", default-features = false, path = "../../core" } sp-io = { version = "6.0.0", default-features = false, path = "../../io" } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../" } diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index f518a2e17498c..1305aef66cacc 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -60,6 +60,18 @@ pub trait TestApi { vec![0; 4 * 1024] } + fn return_option_vec() -> Option> { + let mut vec = Vec::new(); + vec.resize(16 * 1024, 0xAA); + Some(vec) + } + + fn return_option_bytes() -> Option { + let mut vec = Vec::new(); + vec.resize(16 * 1024, 0xAA); + Some(vec.into()) + } + /// Set the storage at key with value. fn set_storage(&mut self, key: &[u8], data: &[u8]) { self.place_storage(key.to_vec(), Some(data.to_vec())); @@ -300,4 +312,12 @@ wasm_export_functions! { assert_eq!(c, res.2); assert_eq!(d, res.3); } + + fn test_return_option_vec() { + test_api::return_option_vec(); + } + + fn test_return_option_bytes() { + test_api::return_option_bytes(); + } } diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index e897fc0bab71c..880d03902b421 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] tracing = "0.1.29" -tracing-core = "0.1.26" +tracing-core = "0.1.28" sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } sc-executor-common = { version = "0.10.0-dev", path = "../../../client/executor/common" } sp-io = { version = "6.0.0", path = "../../io" } diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index 1ab8dbfbbff22..d1db3e064295e 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -23,7 +23,7 @@ use sp_runtime_interface::*; use sp_runtime_interface_test_wasm::{test_api::HostFunctions, wasm_binary_unwrap}; use sp_runtime_interface_test_wasm_deprecated::wasm_binary_unwrap as wasm_binary_deprecated_unwrap; -use sc_executor_common::runtime_blob::RuntimeBlob; +use sc_executor_common::{runtime_blob::RuntimeBlob, wasm_runtime::AllocationStats}; use sp_wasm_interface::{ExtendedHostFunctions, HostFunctions as HostFunctionsT}; use std::{ @@ -36,7 +36,7 @@ type TestExternalities = sp_state_machine::TestExternalities( binary: &[u8], method: &str, -) -> Result { +) -> (Result, Option) { let mut ext = TestExternalities::default(); let mut ext_ext = ext.ext(); @@ -44,20 +44,21 @@ fn call_wasm_method_with_result( ExtendedHostFunctions, >::new(sc_executor::WasmExecutionMethod::Interpreted, Some(8), 8, None, 2); - executor - .uncached_call( - RuntimeBlob::uncompress_if_needed(binary).expect("Failed to parse binary"), - &mut ext_ext, - false, - method, - &[], - ) - .map_err(|e| format!("Failed to execute `{}`: {}", method, e))?; - Ok(ext) + let (result, allocation_stats) = executor.uncached_call_with_allocation_stats( + RuntimeBlob::uncompress_if_needed(binary).expect("Failed to parse binary"), + &mut ext_ext, + false, + method, + &[], + ); + let result = result + .map_err(|e| format!("Failed to execute `{}`: {}", method, e)) + .map(|_| ext); + (result, allocation_stats) } fn call_wasm_method(binary: &[u8], method: &str) -> TestExternalities { - call_wasm_method_with_result::(binary, method).unwrap() + call_wasm_method_with_result::(binary, method).0.unwrap() } #[test] @@ -103,8 +104,9 @@ fn test_return_input_public_key() { #[test] fn host_function_not_found() { - let err = - call_wasm_method_with_result::<()>(wasm_binary_unwrap(), "test_return_data").unwrap_err(); + let err = call_wasm_method_with_result::<()>(wasm_binary_unwrap(), "test_return_data") + .0 + .unwrap_err(); assert!(err.contains("Instantiation: Export ")); assert!(err.contains(" not found")); @@ -236,3 +238,43 @@ fn test_tracing() { fn test_return_input_as_tuple() { call_wasm_method::(wasm_binary_unwrap(), "test_return_input_as_tuple"); } + +#[test] +fn test_returning_option_bytes_from_a_host_function_is_efficient() { + let (result, stats_vec) = call_wasm_method_with_result::( + wasm_binary_unwrap(), + "test_return_option_vec", + ); + result.unwrap(); + let (result, stats_bytes) = call_wasm_method_with_result::( + wasm_binary_unwrap(), + "test_return_option_bytes", + ); + result.unwrap(); + + let stats_vec = stats_vec.unwrap(); + let stats_bytes = stats_bytes.unwrap(); + + // The way we currently implement marshaling of `Option>` through + // the WASM FFI boundary from the host to the runtime requires that it is + // marshaled through SCALE. This is quite inefficient as it requires two + // memory allocations inside of the runtime: + // + // 1) the first allocation to copy the SCALE-encoded blob into the runtime; + // 2) and another allocation for the resulting `Vec` when decoding that blob. + // + // Both of these allocations are are as big as the `Vec` which is being + // passed to the runtime. This is especially bad when fetching big values + // from storage, as it can lead to an out-of-memory situation. + // + // Our `Option` marshaling is better; it still must go through SCALE, + // and it still requires two allocations, however since `Bytes` is zero-copy + // only the first allocation is `Vec`-sized, and the second allocation + // which creates the deserialized `Bytes` is tiny, and is only necessary because + // the underlying `Bytes` buffer from which we're deserializing gets automatically + // turned into an `Arc`. + // + // So this assertion tests that deserializing `Option` allocates less than + // deserializing `Option>`. + assert_eq!(stats_bytes.bytes_allocated_sum + 16 * 1024 + 8, stats_vec.bytes_allocated_sum); +} diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 1493aa2324f56..9e4b36c584e91 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -32,7 +32,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] rand = "0.7.2" -serde_json = "1.0.79" +serde_json = "1.0.85" zstd = { version = "0.11.2", default-features = false } sp-api = { version = "4.0.0-dev", path = "../api" } sp-state-machine = { version = "0.12.0", path = "../state-machine" } diff --git a/primitives/runtime/src/bounded/bounded_btree_map.rs b/primitives/runtime/src/bounded/bounded_btree_map.rs index aefd168632a1e..725864b462694 100644 --- a/primitives/runtime/src/bounded/bounded_btree_map.rs +++ b/primitives/runtime/src/bounded/bounded_btree_map.rs @@ -161,6 +161,13 @@ where { self.0.remove_entry(key) } + + /// Gets a mutable iterator over the entries of the map, sorted by key. + /// + /// See [`BTreeMap::iter_mut`] for more information. + pub fn iter_mut(&mut self) -> sp_std::collections::btree_map::IterMut { + self.0.iter_mut() + } } impl Default for BoundedBTreeMap @@ -508,7 +515,7 @@ pub mod test { b1.iter().map(|(k, v)| (k + 1, *v)).take(2).try_collect().unwrap(); assert_eq!(b2.into_iter().map(|(k, _)| k).collect::>(), vec![2, 3]); - // but these worn't work + // but these won't work let b2: Result>, _> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect(); assert!(b2.is_err()); @@ -517,4 +524,17 @@ pub mod test { b1.iter().map(|(k, v)| (k + 1, *v)).skip(2).try_collect(); assert!(b2.is_err()); } + + #[test] + fn test_iter_mut() { + let mut b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); + + b1.iter_mut().for_each(|(_, v)| *v *= 2); + + assert_eq!(b1, b2); + } } diff --git a/primitives/runtime/src/bounded/bounded_vec.rs b/primitives/runtime/src/bounded/bounded_vec.rs index 203cb5da91511..f670f00b96612 100644 --- a/primitives/runtime/src/bounded/bounded_vec.rs +++ b/primitives/runtime/src/bounded/bounded_vec.rs @@ -104,8 +104,7 @@ where /// A bounded slice. /// /// Similar to a `BoundedVec`, but not owned and cannot be decoded. -#[derive(Encode, scale_info::TypeInfo)] -#[scale_info(skip_type_params(S))] +#[derive(Encode)] pub struct BoundedSlice<'a, T, S>(pub(super) &'a [T], PhantomData); impl<'a, T, S> Default for BoundedSlice<'a, T, S> { @@ -114,6 +113,43 @@ impl<'a, T, S> Default for BoundedSlice<'a, T, S> { } } +// This can be replaced with +// #[derive(scale_info::TypeInfo)] +// #[scale_info(skip_type_params(S))] +// again once this issue is fixed in the rust compiler: https://github.com/rust-lang/rust/issues/96956 +// Tracking issues: https://github.com/paritytech/substrate/issues/11915 +impl<'a, T, S> scale_info::TypeInfo for BoundedSlice<'a, T, S> +where + &'a [T]: scale_info::TypeInfo + 'static, + PhantomData: scale_info::TypeInfo + 'static, + T: scale_info::TypeInfo + 'static, + S: 'static, +{ + type Identity = Self; + + fn type_info() -> ::scale_info::Type { + scale_info::Type::builder() + .path(scale_info::Path::new("BoundedSlice", "sp_runtime::bounded::bounded_vec")) + .type_params(<[_]>::into_vec(Box::new([ + scale_info::TypeParameter::new( + "T", + core::option::Option::Some(::scale_info::meta_type::()), + ), + scale_info::TypeParameter::new("S", ::core::option::Option::None), + ]))) + .docs(&[ + "A bounded slice.", + "", + "Similar to a `BoundedVec`, but not owned and cannot be decoded.", + ]) + .composite( + scale_info::build::Fields::unnamed() + .field(|f| f.ty::<&'static [T]>().type_name("&'static[T]").docs(&[])) + .field(|f| f.ty::>().type_name("PhantomData").docs(&[])), + ) + } +} + // `BoundedSlice`s encode to something which will always decode into a `BoundedVec`, // `WeakBoundedVec`, or a `Vec`. impl<'a, T: Encode + Decode, S: Get> EncodeLike> for BoundedSlice<'a, T, S> {} @@ -297,6 +333,17 @@ impl BoundedVec { self.0.sort_by(compare) } + /// Exactly the same semantics as [`slice::sort_by_key`]. + /// + /// This is safe since sorting cannot change the number of elements in the vector. + pub fn sort_by_key(&mut self, f: F) + where + F: FnMut(&T) -> K, + K: sp_std::cmp::Ord, + { + self.0.sort_by_key(f) + } + /// Exactly the same semantics as [`slice::sort`]. /// /// This is safe since sorting cannot change the number of elements in the vector. @@ -825,7 +872,7 @@ where fn max_encoded_len() -> usize { // BoundedVec encodes like Vec which encodes like [T], which is a compact u32 // plus each item in the slice: - // https://docs.substrate.io/v3/advanced/scale-codec + // See: https://docs.substrate.io/reference/scale-codec/ codec::Compact(S::get()) .encoded_size() .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) @@ -1167,4 +1214,12 @@ pub mod test { b1.iter().map(|x| x + 1).rev().take(2).try_collect(); assert!(b2.is_err()); } + + #[test] + fn bounded_vec_sort_by_key_works() { + let mut v: BoundedVec> = bounded_vec![-5, 4, 1, -3, 2]; + // Sort by absolute value. + v.sort_by_key(|k| k.abs()); + assert_eq!(v, vec![1, 2, -3, 4, -5]); + } } diff --git a/primitives/runtime/src/bounded/weak_bounded_vec.rs b/primitives/runtime/src/bounded/weak_bounded_vec.rs index ed9f4bba62b55..a447e7285f906 100644 --- a/primitives/runtime/src/bounded/weak_bounded_vec.rs +++ b/primitives/runtime/src/bounded/weak_bounded_vec.rs @@ -443,7 +443,7 @@ where fn max_encoded_len() -> usize { // WeakBoundedVec encodes like Vec which encodes like [T], which is a compact u32 // plus each item in the slice: - // https://docs.substrate.io/v3/advanced/scale-codec + // See: https://docs.substrate.io/reference/scale-codec/ codec::Compact(S::get()) .encoded_size() .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index c6bfa66017870..c040b7cf517e0 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -24,7 +24,7 @@ use crate::{ use core::ops::Sub; /// Piecewise Linear function in [0, 1] -> [0, 1]. -#[derive(PartialEq, Eq, sp_core::RuntimeDebug, scale_info::TypeInfo)] +#[derive(PartialEq, Eq, sp_core::RuntimeDebug)] pub struct PiecewiseLinear<'a> { /// Array of points. Must be in order from the lowest abscissas to the highest. pub points: &'a [(Perbill, Perbill)], @@ -32,6 +32,36 @@ pub struct PiecewiseLinear<'a> { pub maximum: Perbill, } +// This can be replaced with +// #[derive(scale_info::TypeInfo)] +// #[scale_info(skip_type_params(S))] +// again once this issue is fixed in the rust compiler: https://github.com/rust-lang/rust/issues/96956 +// Tracking issues: https://github.com/paritytech/substrate/issues/11915 +impl scale_info::TypeInfo for PiecewiseLinear<'static> { + type Identity = Self; + fn type_info() -> ::scale_info::Type { + scale_info::Type::builder() + .path(scale_info::Path::new("PiecewiseLinear", "sp_runtime::curve")) + .type_params(crate::Vec::new()) + .docs(&["Piecewise Linear function in [0, 1] -> [0, 1]."]) + .composite( + scale_info::build::Fields::named() + .field(|f| { + f.ty::<&'static[(Perbill, Perbill)]>() + .name("points") + .type_name("&'static[(Perbill, Perbill)]") + .docs(&["Array of points. Must be in order from the lowest abscissas to the highest."]) + }) + .field(|f| { + f.ty::() + .name("maximum") + .type_name("Perbill") + .docs(&["The maximum value that can be returned."]) + }), + ) + } +} + fn abs_sub + Clone>(a: N, b: N) -> N where { a.clone().max(b.clone()) - a.min(b) } @@ -140,7 +170,7 @@ fn test_calculate_for_fraction_times_denominator() { }; pub fn formal_calculate_for_fraction_times_denominator(n: u64, d: u64) -> u64 { - if n <= Perbill::from_parts(0_500_000_000) * d.clone() { + if n <= Perbill::from_parts(0_500_000_000) * d { n + d / 2 } else { (d as u128 * 2 - n as u128 * 2).try_into().unwrap() diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 257745acbdbc3..795f2273db629 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -151,6 +151,11 @@ impl Justifications { self.iter().find(|j| j.0 == engine_id).map(|j| &j.1) } + /// Remove the encoded justification for the given consensus engine, if it exists. + pub fn remove(&mut self, engine_id: ConsensusEngineId) { + self.0.retain(|j| j.0 != engine_id) + } + /// Return a copy of the encoded justification for the given consensus /// engine, if it exists. pub fn into_justification(self, engine_id: ConsensusEngineId) -> Option { @@ -1126,7 +1131,7 @@ mod tests { ext.insert(b"c".to_vec(), vec![3u8; 33]); ext.insert(b"d".to_vec(), vec![4u8; 33]); - let pre_root = ext.backend.root().clone(); + let pre_root = *ext.backend.root(); let (_, proof) = ext.execute_and_prove(|| { sp_io::storage::get(b"a"); sp_io::storage::get(b"b"); diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index fba3117c41617..a82ae1d62f56a 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -55,7 +55,7 @@ pub trait Lazy { impl<'a> Lazy<[u8]> for &'a [u8] { fn get(&mut self) -> &[u8] { - &**self + self } } diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index c81f1cd10a824..585e4b4e0dc20 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -15,4 +15,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = "1.0.136" -serde_json = "1.0.79" +serde_json = "1.0.85" diff --git a/primitives/staking/src/offence.rs b/primitives/staking/src/offence.rs index 4261063993a52..f6517b9e9028b 100644 --- a/primitives/staking/src/offence.rs +++ b/primitives/staking/src/offence.rs @@ -108,11 +108,10 @@ pub trait Offence { } /// A slash fraction of the total exposure that should be slashed for this - /// particular offence kind for the given parameters that happened at a singular `TimeSlot`. + /// particular offence for the `offenders_count` that happened at a singular `TimeSlot`. /// - /// `offenders_count` - the count of unique offending authorities. It is >0. - /// `validator_set_count` - the cardinality of the validator set at the time of offence. - fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill; + /// `offenders_count` - the count of unique offending authorities for this `TimeSlot`. It is >0. + fn slash_fraction(&self, offenders_count: u32) -> Perbill; } /// Errors that may happen on offence reports. diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 1c652966180a7..5de6eb7112eea 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hash-db = { version = "0.15.2", default-features = false } log = { version = "0.4.17", optional = true } num-traits = { version = "0.2.8", default-features = false } -parking_lot = { version = "0.12.0", optional = true } +parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.7.2", optional = true } smallvec = "1.8.0" thiserror = { version = "1.0.30", optional = true } @@ -35,6 +35,7 @@ hex-literal = "0.3.4" pretty_assertions = "1.2.1" rand = "0.7.2" sp-runtime = { version = "6.0.0", path = "../runtime" } +trie-db = "0.24.0" assert_matches = "1.5" [features] diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 505b53c800f9e..791183c4d7e4d 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -17,9 +17,11 @@ //! State machine backends. These manage the code and storage of contracts. +#[cfg(feature = "std")] +use crate::trie_backend::TrieBackend; use crate::{ - trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, ChildStorageCollection, - StorageCollection, StorageKey, StorageValue, UsageInfo, + trie_backend_essence::TrieBackendStorage, ChildStorageCollection, StorageCollection, + StorageKey, StorageValue, UsageInfo, }; use codec::Encode; use hash_db::Hasher; @@ -46,9 +48,7 @@ pub trait Backend: sp_std::fmt::Debug { fn storage(&self, key: &[u8]) -> Result, Self::Error>; /// Get keyed storage value hash or None if there is nothing associated. - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.storage(key).map(|v| v.map(|v| H::hash(&v))) - } + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error>; /// Get keyed child storage or None if there is nothing associated. fn child_storage( @@ -62,13 +62,11 @@ pub trait Backend: sp_std::fmt::Debug { &self, child_info: &ChildInfo, key: &[u8], - ) -> Result, Self::Error> { - self.child_storage(child_info, key).map(|v| v.map(|v| H::hash(&v))) - } + ) -> Result, Self::Error>; /// true if a key exists in storage. fn exists_storage(&self, key: &[u8]) -> Result { - Ok(self.storage(key)?.is_some()) + Ok(self.storage_hash(key)?.is_some()) } /// true if a key exists in child storage. @@ -77,7 +75,7 @@ pub trait Backend: sp_std::fmt::Debug { child_info: &ChildInfo, key: &[u8], ) -> Result { - Ok(self.child_storage(child_info, key)?.is_some()) + Ok(self.child_storage_hash(child_info, key)?.is_some()) } /// Return the next key in storage in lexicographic order or `None` if there is no value. @@ -175,10 +173,6 @@ pub trait Backend: sp_std::fmt::Debug { all } - /// Try convert into trie backend. - fn as_trie_backend(&self) -> Option<&TrieBackend> { - None - } /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. /// Does include child storage updates. @@ -273,6 +267,16 @@ pub trait Backend: sp_std::fmt::Debug { } } +/// Something that can be converted into a [`TrieBackend`]. +#[cfg(feature = "std")] +pub trait AsTrieBackend> { + /// Type of trie backend storage. + type TrieBackendStorage: TrieBackendStorage; + + /// Return the type as [`TrieBackend`]. + fn as_trie_backend(&self) -> &TrieBackend; +} + /// Trait that allows consolidate two transactions together. pub trait Consolidate { /// Consolidate two transactions into one. diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 6efc847bfbdb7..236a515a2412d 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -17,14 +17,13 @@ //! Basic implementation for Externalities. -use crate::{Backend, StorageKey, StorageValue}; +use crate::{Backend, OverlayedChanges, StorageKey, StorageValue}; use codec::Encode; use hash_db::Hasher; use log::warn; use sp_core::{ storage::{ - well_known_keys::is_child_storage_key, ChildInfo, StateVersion, Storage, StorageChild, - TrackedStorageKey, + well_known_keys::is_child_storage_key, ChildInfo, StateVersion, Storage, TrackedStorageKey, }, traits::Externalities, Blake2Hasher, @@ -35,20 +34,19 @@ use std::{ any::{Any, TypeId}, collections::BTreeMap, iter::FromIterator, - ops::Bound, }; /// Simple Map-based Externalities impl. #[derive(Debug)] pub struct BasicExternalities { - inner: Storage, + overlay: OverlayedChanges, extensions: Extensions, } impl BasicExternalities { /// Create a new instance of `BasicExternalities` pub fn new(inner: Storage) -> Self { - BasicExternalities { inner, extensions: Default::default() } + BasicExternalities { overlay: inner.into(), extensions: Default::default() } } /// New basic externalities with empty storage. @@ -57,13 +55,34 @@ impl BasicExternalities { } /// Insert key/value - pub fn insert(&mut self, k: StorageKey, v: StorageValue) -> Option { - self.inner.top.insert(k, v) + pub fn insert(&mut self, k: StorageKey, v: StorageValue) { + self.overlay.set_storage(k, Some(v)); } /// Consume self and returns inner storages pub fn into_storages(self) -> Storage { - self.inner + Storage { + top: self + .overlay + .changes() + .filter_map(|(k, v)| v.value().map(|v| (k.to_vec(), v.to_vec()))) + .collect(), + children_default: self + .overlay + .children() + .map(|(iter, i)| { + ( + i.storage_key().to_vec(), + sp_core::storage::StorageChild { + data: iter + .filter_map(|(k, v)| v.value().map(|v| (k.to_vec(), v.to_vec()))) + .collect(), + child_info: i.clone(), + }, + ) + }) + .collect(), + } } /// Execute the given closure `f` with the externalities set and initialized with `storage`. @@ -73,13 +92,7 @@ impl BasicExternalities { storage: &mut sp_core::storage::Storage, f: impl FnOnce() -> R, ) -> R { - let mut ext = Self { - inner: Storage { - top: std::mem::take(&mut storage.top), - children_default: std::mem::take(&mut storage.children_default), - }, - extensions: Default::default(), - }; + let mut ext = Self::new(std::mem::take(storage)); let r = ext.execute_with(f); @@ -108,15 +121,26 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { - self.inner.top.eq(&other.inner.top) && - self.inner.children_default.eq(&other.inner.children_default) + self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() == + other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() && + self.overlay + .children() + .map(|(iter, i)| (i, iter.map(|(k, v)| (k, v.value())).collect::>())) + .collect::>() == + other + .overlay + .children() + .map(|(iter, i)| { + (i, iter.map(|(k, v)| (k, v.value())).collect::>()) + }) + .collect::>() } } impl FromIterator<(StorageKey, StorageValue)> for BasicExternalities { fn from_iter>(iter: I) -> Self { let mut t = Self::default(); - t.inner.top.extend(iter); + iter.into_iter().for_each(|(k, v)| t.insert(k, v)); t } } @@ -128,11 +152,8 @@ impl Default for BasicExternalities { } impl From> for BasicExternalities { - fn from(hashmap: BTreeMap) -> Self { - BasicExternalities { - inner: Storage { top: hashmap, children_default: Default::default() }, - extensions: Default::default(), - } + fn from(map: BTreeMap) -> Self { + Self::from_iter(map.into_iter()) } } @@ -140,7 +161,7 @@ impl Externalities for BasicExternalities { fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) {} fn storage(&self, key: &[u8]) -> Option { - self.inner.top.get(key).cloned() + self.overlay.storage(key).and_then(|v| v.map(|v| v.to_vec())) } fn storage_hash(&self, key: &[u8]) -> Option> { @@ -148,11 +169,7 @@ impl Externalities for BasicExternalities { } fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { - self.inner - .children_default - .get(child_info.storage_key()) - .and_then(|child| child.data.get(key)) - .cloned() + self.overlay.child_storage(child_info, key).and_then(|v| v.map(|v| v.to_vec())) } fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { @@ -160,16 +177,13 @@ impl Externalities for BasicExternalities { } fn next_storage_key(&self, key: &[u8]) -> Option { - let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.top.range::<[u8], _>(range).next().map(|(k, _)| k).cloned() + self.overlay.iter_after(key).find_map(|(k, v)| v.value().map(|_| k.to_vec())) } fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { - let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner - .children_default - .get(child_info.storage_key()) - .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) + self.overlay + .child_iter_after(child_info.storage_key(), key) + .find_map(|(k, v)| v.value().map(|_| k.to_vec())) } fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { @@ -178,14 +192,7 @@ impl Externalities for BasicExternalities { return } - match maybe_value { - Some(value) => { - self.inner.top.insert(key, value); - }, - None => { - self.inner.top.remove(&key); - }, - } + self.overlay.set_storage(key, maybe_value) } fn place_child_storage( @@ -194,19 +201,7 @@ impl Externalities for BasicExternalities { key: StorageKey, value: Option, ) { - let child_map = self - .inner - .children_default - .entry(child_info.storage_key().to_vec()) - .or_insert_with(|| StorageChild { - data: Default::default(), - child_info: child_info.to_owned(), - }); - if let Some(value) = value { - child_map.data.insert(key, value); - } else { - child_map.data.remove(&key); - } + self.overlay.set_child_storage(child_info, key, value); } fn kill_child_storage( @@ -215,12 +210,7 @@ impl Externalities for BasicExternalities { _maybe_limit: Option, _maybe_cursor: Option<&[u8]>, ) -> MultiRemovalResults { - let count = self - .inner - .children_default - .remove(child_info.storage_key()) - .map(|c| c.data.len()) - .unwrap_or(0) as u32; + let count = self.overlay.clear_child_storage(child_info); MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } @@ -239,19 +229,7 @@ impl Externalities for BasicExternalities { return MultiRemovalResults { maybe_cursor, backend: 0, unique: 0, loops: 0 } } - let to_remove = self - .inner - .top - .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) - .map(|(k, _)| k) - .take_while(|k| k.starts_with(prefix)) - .cloned() - .collect::>(); - - let count = to_remove.len() as u32; - for key in to_remove { - self.inner.top.remove(&key); - } + let count = self.overlay.clear_prefix(prefix); MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } @@ -262,56 +240,37 @@ impl Externalities for BasicExternalities { _maybe_limit: Option, _maybe_cursor: Option<&[u8]>, ) -> MultiRemovalResults { - if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { - let to_remove = child - .data - .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) - .map(|(k, _)| k) - .take_while(|k| k.starts_with(prefix)) - .cloned() - .collect::>(); - - let count = to_remove.len() as u32; - for key in to_remove { - child.data.remove(&key); - } - MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } - } else { - MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } - } + let count = self.overlay.clear_child_prefix(child_info, prefix); + MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } fn storage_append(&mut self, key: Vec, value: Vec) { - let current = self.inner.top.entry(key).or_default(); - crate::ext::StorageAppend::new(current).append(value); + let current_value = self.overlay.value_mut_or_insert_with(&key, || Default::default()); + crate::ext::StorageAppend::new(current_value).append(value); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { - let mut top = self.inner.top.clone(); - let prefixed_keys: Vec<_> = self - .inner - .children_default - .iter() - .map(|(_k, v)| (v.child_info.prefixed_storage_key(), v.child_info.clone())) - .collect(); + let mut top = self + .overlay + .changes() + .filter_map(|(k, v)| v.value().map(|v| (k.clone(), v.clone()))) + .collect::>(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. let empty_hash = empty_child_trie_root::>(); - for (prefixed_storage_key, child_info) in prefixed_keys { + for child_info in self.overlay.children().map(|d| d.1.clone()).collect::>() { let child_root = self.child_storage_root(&child_info, state_version); if empty_hash[..] == child_root[..] { - top.remove(prefixed_storage_key.as_slice()); + top.remove(child_info.prefixed_storage_key().as_slice()); } else { - top.insert(prefixed_storage_key.into_inner(), child_root); + top.insert(child_info.prefixed_storage_key().into_inner(), child_root); } } match state_version { - StateVersion::V0 => - LayoutV0::::trie_root(self.inner.top.clone()).as_ref().into(), - StateVersion::V1 => - LayoutV1::::trie_root(self.inner.top.clone()).as_ref().into(), + StateVersion::V0 => LayoutV0::::trie_root(top).as_ref().into(), + StateVersion::V1 => LayoutV1::::trie_root(top).as_ref().into(), } } @@ -320,10 +279,11 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, state_version: StateVersion, ) -> Vec { - if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { - let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); + if let Some((data, child_info)) = self.overlay.child_changes(child_info.storage_key()) { + let delta = + data.into_iter().map(|(k, v)| (k.as_ref(), v.value().map(|v| v.as_slice()))); crate::in_memory_backend::new_in_mem::>() - .child_storage_root(&child.child_info, delta, state_version) + .child_storage_root(&child_info, delta, state_version) .0 } else { empty_child_trie_root::>() @@ -332,15 +292,15 @@ impl Externalities for BasicExternalities { } fn storage_start_transaction(&mut self) { - unimplemented!("Transactions are not supported by BasicExternalities"); + self.overlay.start_transaction() } fn storage_rollback_transaction(&mut self) -> Result<(), ()> { - unimplemented!("Transactions are not supported by BasicExternalities"); + self.overlay.rollback_transaction().map_err(drop) } fn storage_commit_transaction(&mut self) -> Result<(), ()> { - unimplemented!("Transactions are not supported by BasicExternalities"); + self.overlay.commit_transaction().map_err(drop) } fn wipe(&mut self) {} diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 6df23cdb7096e..e205e83e85572 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -19,6 +19,7 @@ use crate::{ backend::Backend, trie_backend::TrieBackend, StorageCollection, StorageKey, StorageValue, + TrieBackendBuilder, }; use codec::Codec; use hash_db::Hasher; @@ -46,7 +47,7 @@ where { let db = GenericMemoryDB::default(); // V1 is same as V0 for an empty trie. - TrieBackend::new(db, empty_trie_root::>()) + TrieBackendBuilder::new(db, empty_trie_root::>()).build() } impl TrieBackend, H> @@ -87,14 +88,14 @@ where pub fn update_backend(&self, root: H::Out, changes: GenericMemoryDB) -> Self { let mut clone = self.backend_storage().clone(); clone.consolidate(changes); - Self::new(clone, root) + TrieBackendBuilder::new(clone, root).build() } /// Apply the given transaction to this backend and set the root to the given value. pub fn apply_transaction(&mut self, root: H::Out, transaction: GenericMemoryDB) { let mut storage = sp_std::mem::take(self).into_storage(); storage.consolidate(transaction); - *self = TrieBackend::new(storage, root); + *self = TrieBackendBuilder::new(storage, root).build(); } /// Compare with another in-memory backend. @@ -109,7 +110,7 @@ where KF: KeyFunction + Send + Sync, { fn clone(&self) -> Self { - TrieBackend::new(self.backend_storage().clone(), *self.root()) + TrieBackendBuilder::new(self.backend_storage().clone(), *self.root()).build() } } @@ -203,7 +204,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::backend::Backend; + use crate::backend::{AsTrieBackend, Backend}; use sp_core::storage::StateVersion; use sp_runtime::traits::BlakeTwo256; @@ -218,7 +219,7 @@ mod tests { vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], state_version, ); - let trie_backend = storage.as_trie_backend().unwrap(); + let trie_backend = storage.as_trie_backend(); assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); let storage_key = child_info.prefixed_storage_key(); assert!(trie_backend.storage(storage_key.as_slice()).unwrap().is_some()); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index edc3db7a60e36..59488599883e9 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -29,8 +29,6 @@ mod ext; mod in_memory_backend; pub(crate) mod overlayed_changes; #[cfg(feature = "std")] -mod proving_backend; -#[cfg(feature = "std")] mod read_only; mod stats; #[cfg(feature = "std")] @@ -134,7 +132,7 @@ pub use crate::{ StorageTransactionCache, StorageValue, }, stats::{StateMachineStats, UsageInfo, UsageUnit}, - trie_backend::TrieBackend, + trie_backend::{TrieBackend, TrieBackendBuilder}, trie_backend_essence::{Storage, TrieBackendStorage}, }; @@ -144,11 +142,9 @@ mod std_reexport { basic::BasicExternalities, error::{Error, ExecutionError}, in_memory_backend::{new_in_mem, new_in_mem_hash_key}, - proving_backend::{ - create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, - }, read_only::{InspectState, ReadOnlyExternalities}, testing::TestExternalities, + trie_backend::create_proof_check_backend, }; pub use sp_trie::{ trie_types::{TrieDBMutV0, TrieDBMutV1}, @@ -158,6 +154,8 @@ mod std_reexport { #[cfg(feature = "std")] mod execution { + use crate::backend::AsTrieBackend; + use super::*; use codec::{Codec, Decode, Encode}; use hash_db::Hasher; @@ -188,9 +186,6 @@ mod execution { /// Trie backend with in-memory storage. pub type InMemoryBackend = TrieBackend, H>; - /// Proving Trie backend with in-memory storage. - pub type InMemoryProvingBackend<'a, H> = ProvingBackend<'a, MemoryDB, H>; - /// Strategy for executing a call into the runtime. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum ExecutionStrategy { @@ -562,15 +557,13 @@ mod execution { runtime_code: &RuntimeCode, ) -> Result<(Vec, StorageProof), Box> where - B: Backend, + B: AsTrieBackend, H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, Spawn: SpawnNamed + Send + 'static, { - let trie_backend = backend - .as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + let trie_backend = backend.as_trie_backend(); prove_execution_on_trie_backend::<_, _, _, _>( trie_backend, overlay, @@ -607,23 +600,31 @@ mod execution { Exec: CodeExecutor + 'static + Clone, Spawn: SpawnNamed + Send + 'static, { - let proving_backend = proving_backend::ProvingBackend::new(trie_backend); - let mut sm = StateMachine::<_, H, Exec>::new( - &proving_backend, - overlay, - exec, - method, - call_data, - Extensions::default(), - runtime_code, - spawn_handle, - ); + let proving_backend = + TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); + + let result = { + let mut sm = StateMachine::<_, H, Exec>::new( + &proving_backend, + overlay, + exec, + method, + call_data, + Extensions::default(), + runtime_code, + spawn_handle, + ); + + sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + always_wasm(), + None, + )? + }; + + let proof = proving_backend + .extract_proof() + .expect("A recorder was set and thus, a storage proof can be extracted; qed"); - let result = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - always_wasm(), - None, - )?; - let proof = sm.backend.extract_proof(); Ok((result.into_encoded(), proof)) } @@ -639,7 +640,7 @@ mod execution { runtime_code: &RuntimeCode, ) -> Result, Box> where - H: Hasher, + H: Hasher + 'static, Exec: CodeExecutor + Clone + 'static, H::Out: Ord + 'static + codec::Codec, Spawn: SpawnNamed + Send + 'static, @@ -693,15 +694,13 @@ mod execution { /// Generate storage read proof. pub fn prove_read(backend: B, keys: I) -> Result> where - B: Backend, + B: AsTrieBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend - .as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + let trie_backend = backend.as_trie_backend(); prove_read_on_trie_backend(trie_backend, keys) } @@ -829,13 +828,11 @@ mod execution { start_at: &[Vec], ) -> Result<(StorageProof, u32), Box> where - B: Backend, + B: AsTrieBackend, H: Hasher, H::Out: Ord + Codec, { - let trie_backend = backend - .as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + let trie_backend = backend.as_trie_backend(); prove_range_read_with_child_with_size_on_trie_backend(trie_backend, size_limit, start_at) } @@ -856,7 +853,9 @@ mod execution { return Err(Box::new("Invalid start of range.")) } - let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); + let recorder = sp_trie::recorder::Recorder::default(); + let proving_backend = + TrieBackendBuilder::wrap(trie_backend).with_recorder(recorder.clone()).build(); let mut count = 0; let mut child_roots = HashSet::new(); @@ -924,7 +923,7 @@ mod execution { // do not add two child trie with same root true } - } else if proving_backend.estimate_encoded_size() <= size_limit { + } else if recorder.estimate_encoded_size() <= size_limit { count += 1; true } else { @@ -948,7 +947,11 @@ mod execution { start_at = None; } } - Ok((proving_backend.extract_proof(), count)) + + let proof = proving_backend + .extract_proof() + .expect("A recorder was set and thus, a storage proof can be extracted; qed"); + Ok((proof, count)) } /// Generate range storage read proof. @@ -960,13 +963,11 @@ mod execution { start_at: Option<&[u8]>, ) -> Result<(StorageProof, u32), Box> where - B: Backend, + B: AsTrieBackend, H: Hasher, H::Out: Ord + Codec, { - let trie_backend = backend - .as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + let trie_backend = backend.as_trie_backend(); prove_range_read_with_size_on_trie_backend( trie_backend, child_info, @@ -989,7 +990,9 @@ mod execution { H: Hasher, H::Out: Ord + Codec, { - let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); + let recorder = sp_trie::recorder::Recorder::default(); + let proving_backend = + TrieBackendBuilder::wrap(trie_backend).with_recorder(recorder.clone()).build(); let mut count = 0; proving_backend .apply_to_key_values_while( @@ -997,7 +1000,7 @@ mod execution { prefix, start_at, |_key, _value| { - if count == 0 || proving_backend.estimate_encoded_size() <= size_limit { + if count == 0 || recorder.estimate_encoded_size() <= size_limit { count += 1; true } else { @@ -1007,7 +1010,11 @@ mod execution { false, ) .map_err(|e| Box::new(e) as Box)?; - Ok((proving_backend.extract_proof(), count)) + + let proof = proving_backend + .extract_proof() + .expect("A recorder was set and thus, a storage proof can be extracted; qed"); + Ok((proof, count)) } /// Generate child storage read proof. @@ -1017,15 +1024,13 @@ mod execution { keys: I, ) -> Result> where - B: Backend, + B: AsTrieBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend - .as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + let trie_backend = backend.as_trie_backend(); prove_child_read_on_trie_backend(trie_backend, child_info, keys) } @@ -1041,13 +1046,17 @@ mod execution { I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + let proving_backend = + TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); for key in keys.into_iter() { proving_backend .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof()) + + Ok(proving_backend + .extract_proof() + .expect("A recorder was set and thus, a storage proof can be extracted; qed")) } /// Generate storage read proof on pre-created trie backend. @@ -1063,13 +1072,17 @@ mod execution { I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + let proving_backend = + TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); for key in keys.into_iter() { proving_backend .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof()) + + Ok(proving_backend + .extract_proof() + .expect("A recorder was set and thus, a storage proof can be extracted; qed")) } /// Check storage read proof, generated by `prove_read` call. @@ -1079,7 +1092,7 @@ mod execution { keys: I, ) -> Result, Option>>, Box> where - H: Hasher, + H: Hasher + 'static, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, @@ -1104,7 +1117,7 @@ mod execution { start_at: &[Vec], ) -> Result<(KeyValueStates, usize), Box> where - H: Hasher, + H: Hasher + 'static, H::Out: Ord + Codec, { let proving_backend = create_proof_check_backend::(root, proof)?; @@ -1121,7 +1134,7 @@ mod execution { start_at: Option<&[u8]>, ) -> Result<(Vec<(Vec, Vec)>, bool), Box> where - H: Hasher, + H: Hasher + 'static, H::Out: Ord + Codec, { let proving_backend = create_proof_check_backend::(root, proof)?; @@ -1142,7 +1155,7 @@ mod execution { keys: I, ) -> Result, Option>>, Box> where - H: Hasher, + H: Hasher + 'static, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, @@ -1346,7 +1359,7 @@ mod execution { #[cfg(test)] mod tests { - use super::{ext::Ext, *}; + use super::{backend::AsTrieBackend, ext::Ext, *}; use crate::{execution::CallResult, in_memory_backend::new_in_mem_hash_key}; use assert_matches::assert_matches; use codec::{Decode, Encode}; @@ -1358,6 +1371,7 @@ mod tests { NativeOrEncoded, NeverNativeValue, }; use sp_runtime::traits::BlakeTwo256; + use sp_trie::trie_types::{TrieDBMutBuilderV0, TrieDBMutBuilderV1}; use std::{ collections::{BTreeMap, HashMap}, panic::UnwindSafe, @@ -1419,7 +1433,7 @@ mod tests { execute_works_inner(StateVersion::V1); } fn execute_works_inner(state_version: StateVersion) { - let backend = trie_backend::tests::test_trie(state_version); + let backend = trie_backend::tests::test_trie(state_version, None, None); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1447,7 +1461,7 @@ mod tests { execute_works_with_native_else_wasm_inner(StateVersion::V1); } fn execute_works_with_native_else_wasm_inner(state_version: StateVersion) { - let backend = trie_backend::tests::test_trie(state_version); + let backend = trie_backend::tests::test_trie(state_version, None, None); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1476,7 +1490,7 @@ mod tests { } fn dual_execution_strategy_detects_consensus_failure_inner(state_version: StateVersion) { let mut consensus_failed = false; - let backend = trie_backend::tests::test_trie(state_version); + let backend = trie_backend::tests::test_trie(state_version, None, None); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1520,7 +1534,7 @@ mod tests { }; // fetch execution proof from 'remote' full node - let mut remote_backend = trie_backend::tests::test_trie(state_version); + let mut remote_backend = trie_backend::tests::test_trie(state_version, None, None); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let (remote_result, remote_proof) = prove_execution( &mut remote_backend, @@ -1560,7 +1574,7 @@ mod tests { b"bbb".to_vec() => b"3".to_vec() ]; let state = InMemoryBackend::::from((initial, StateVersion::default())); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); overlay.set_storage(b"aba".to_vec(), Some(b"1312".to_vec())); @@ -1716,7 +1730,7 @@ mod tests { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; let state = new_in_mem_hash_key::(); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); @@ -1732,7 +1746,7 @@ mod tests { let reference_data = vec![b"data1".to_vec(), b"2".to_vec(), b"D3".to_vec(), b"d4".to_vec()]; let key = b"key".to_vec(); let state = new_in_mem_hash_key::(); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); { @@ -1769,7 +1783,7 @@ mod tests { let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); let state = new_in_mem_hash_key::(); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); // For example, block initialization with event. @@ -1840,7 +1854,7 @@ mod tests { let child_info = &child_info; let missing_child_info = &missing_child_info; // fetch read proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); @@ -1857,7 +1871,7 @@ mod tests { ); assert_eq!(local_result2, false); // on child trie - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let remote_proof = prove_child_read(remote_backend, child_info, &[b"value3"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); @@ -1924,8 +1938,8 @@ mod tests { let trie: InMemoryBackend = (storage.clone(), StateVersion::default()).into(); - let trie_root = trie.root(); - let backend = crate::ProvingBackend::new(&trie); + let trie_root = *trie.root(); + let backend = TrieBackendBuilder::wrap(&trie).with_recorder(Default::default()).build(); let mut queries = Vec::new(); for c in 0..(5 + nb_child_trie / 2) { // random existing query @@ -1970,10 +1984,10 @@ mod tests { } } - let storage_proof = backend.extract_proof(); + let storage_proof = backend.extract_proof().expect("Failed to extract proof"); let remote_proof = test_compact(storage_proof, &trie_root); let proof_check = - create_proof_check_backend::(*trie_root, remote_proof).unwrap(); + create_proof_check_backend::(trie_root, remote_proof).unwrap(); for (child_info, key, expected) in queries { assert_eq!( @@ -1987,7 +2001,7 @@ mod tests { #[test] fn prove_read_with_size_limit_works() { let state_version = StateVersion::V0; - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let remote_root = remote_backend.storage_root(::std::iter::empty(), state_version).0; let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); @@ -1995,7 +2009,7 @@ mod tests { assert_eq!(proof.into_memory_db::().drain().len(), 3); assert_eq!(count, 1); - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap(); assert_eq!(proof.clone().into_memory_db::().drain().len(), 9); @@ -2018,7 +2032,7 @@ mod tests { assert_eq!(results.len() as u32, 101); assert_eq!(completed, false); - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap(); assert_eq!(proof.clone().into_memory_db::().drain().len(), 11); @@ -2035,7 +2049,7 @@ mod tests { let mut state_version = StateVersion::V0; let (mut mdb, mut root) = trie_backend::tests::test_db(state_version); { - let mut trie = TrieDBMutV0::from_existing(&mut mdb, &mut root).unwrap(); + let mut trie = TrieDBMutBuilderV0::from_existing(&mut mdb, &mut root).build(); trie.insert(b"foo", vec![1u8; 1_000].as_slice()) // big inner hash .expect("insert failed"); trie.insert(b"foo2", vec![3u8; 16].as_slice()) // no inner hash @@ -2045,7 +2059,7 @@ mod tests { } let check_proof = |mdb, root, state_version| -> StorageProof { - let remote_backend = TrieBackend::new(mdb, root); + let remote_backend = TrieBackendBuilder::new(mdb, root).build(); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let remote_proof = prove_read(remote_backend, &[b"foo222"]).unwrap(); // check proof locally @@ -2069,7 +2083,7 @@ mod tests { // do switch state_version = StateVersion::V1; { - let mut trie = TrieDBMutV1::from_existing(&mut mdb, &mut root).unwrap(); + let mut trie = TrieDBMutBuilderV1::from_existing(&mut mdb, &mut root).build(); trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash .expect("insert failed"); // update with same value do change @@ -2088,10 +2102,10 @@ mod tests { #[test] fn prove_range_with_child_works() { let state_version = StateVersion::V0; - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let mut start_at = smallvec::SmallVec::<[Vec; 2]>::new(); - let trie_backend = remote_backend.as_trie_backend().unwrap(); + let trie_backend = remote_backend.as_trie_backend(); let max_iter = 1000; let mut nb_loop = 0; loop { @@ -2138,7 +2152,7 @@ mod tests { let child_info2 = ChildInfo::new_default(b"sub2"); // this root will be include in proof let child_info3 = ChildInfo::new_default(b"sub"); - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let long_vec: Vec = (0..1024usize).map(|_| 8u8).collect(); let (remote_root, transaction) = remote_backend.full_storage_root( std::iter::empty(), @@ -2170,9 +2184,9 @@ mod tests { .into_iter(), state_version, ); - let mut remote_storage = remote_backend.into_storage(); + let mut remote_storage = remote_backend.backend_storage().clone(); remote_storage.consolidate(transaction); - let remote_backend = TrieBackend::new(remote_storage, remote_root); + let remote_backend = TrieBackendBuilder::new(remote_storage, remote_root).build(); let remote_proof = prove_child_read(remote_backend, &child_info1, &[b"key1"]).unwrap(); let size = remote_proof.encoded_size(); let remote_proof = test_compact(remote_proof, &remote_root); @@ -2198,7 +2212,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let mut transaction = { - let backend = test_trie(state_version); + let backend = test_trie(state_version, None, None); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); @@ -2224,7 +2238,7 @@ mod tests { b"bbb".to_vec() => b"".to_vec() ]; let state = InMemoryBackend::::from((initial, StateVersion::default())); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); overlay.start_transaction(); @@ -2255,7 +2269,7 @@ mod tests { struct DummyExt(u32); } - let backend = trie_backend::tests::test_trie(state_version); + let backend = trie_backend::tests::test_trie(state_version, None, None); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs index ae5d47f6bb943..e5dad7157c731 100644 --- a/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -57,7 +57,7 @@ pub struct NotInRuntime; /// Describes in which mode the node is currently executing. #[derive(Debug, Clone, Copy)] pub enum ExecutionMode { - /// Exeuting in client mode: Removal of all transactions possible. + /// Executing in client mode: Removal of all transactions possible. Client, /// Executing in runtime mode: Transactions started by the client are protected. Runtime, @@ -95,7 +95,7 @@ pub type OverlayedChangeSet = OverlayedMap>; /// Holds a set of changes with the ability modify them using nested transactions. #[derive(Debug, Clone)] -pub struct OverlayedMap { +pub struct OverlayedMap { /// Stores the changes that this overlay constitutes. changes: BTreeMap>, /// Stores which keys are dirty per transaction. Needed in order to determine which @@ -110,7 +110,7 @@ pub struct OverlayedMap { execution_mode: ExecutionMode, } -impl Default for OverlayedMap { +impl Default for OverlayedMap { fn default() -> Self { Self { changes: BTreeMap::new(), @@ -121,6 +121,31 @@ impl Default for OverlayedMap { } } +#[cfg(feature = "std")] +impl From for OverlayedMap> { + fn from(storage: sp_core::storage::StorageMap) -> Self { + Self { + changes: storage + .into_iter() + .map(|(k, v)| { + ( + k, + OverlayedEntry { + transactions: SmallVec::from_iter([InnerValue { + value: Some(v), + extrinsics: Default::default(), + }]), + }, + ) + }) + .collect(), + dirty_keys: Default::default(), + num_client_transactions: 0, + execution_mode: ExecutionMode::Client, + } + } +} + impl Default for ExecutionMode { fn default() -> Self { Self::Client diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 746599a6768e6..001b4b656c34e 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -638,6 +638,21 @@ impl OverlayedChanges { } } +#[cfg(feature = "std")] +impl From for OverlayedChanges { + fn from(storage: sp_core::storage::Storage) -> Self { + Self { + top: storage.top.into(), + children: storage + .children_default + .into_iter() + .map(|(k, v)| (k, (v.data.into(), v.child_info))) + .collect(), + ..Default::default() + } + } +} + #[cfg(feature = "std")] fn retain_map(map: &mut Map, f: F) where diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs deleted file mode 100644 index f5cf542908b10..0000000000000 --- a/primitives/state-machine/src/proving_backend.rs +++ /dev/null @@ -1,611 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Proving state machine backend. - -use crate::{ - trie_backend::TrieBackend, - trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}, - Backend, DBValue, Error, ExecutionError, -}; -use codec::{Codec, Decode, Encode}; -use hash_db::{HashDB, Hasher, Prefix, EMPTY_PREFIX}; -use log::debug; -use parking_lot::RwLock; -use sp_core::storage::{ChildInfo, StateVersion}; -pub use sp_trie::trie_types::TrieError; -use sp_trie::{ - empty_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, - LayoutV1, MemoryDB, Recorder, StorageProof, -}; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; - -/// Patricia trie-based backend specialized in get value proofs. -pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - pub(crate) backend: &'a TrieBackendEssence, - pub(crate) proof_recorder: &'a mut Recorder, -} - -impl<'a, S, H> ProvingBackendRecorder<'a, S, H> -where - S: TrieBackendStorage, - H: Hasher, - H::Out: Codec, -{ - /// Produce proof for a key query. - pub fn storage(&mut self, key: &[u8]) -> Result>, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); - - let map_e = |e| format!("Trie lookup error: {}", e); - - // V1 is equivalent to V0 on read. - read_trie_value_with::, _, Ephemeral>( - &eph, - self.backend.root(), - key, - &mut *self.proof_recorder, - ) - .map_err(map_e) - } - - /// Produce proof for a child key query. - pub fn child_storage( - &mut self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, String> { - let storage_key = child_info.storage_key(); - let root = self - .storage(storage_key)? - .and_then(|r| Decode::decode(&mut &r[..]).ok()) - // V1 is equivalent to V0 on empty trie - .unwrap_or_else(empty_child_trie_root::>); - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); - - let map_e = |e| format!("Trie lookup error: {}", e); - - // V1 is equivalent to V0 on read - read_child_trie_value_with::, _, _>( - child_info.keyspace(), - &eph, - root.as_ref(), - key, - &mut *self.proof_recorder, - ) - .map_err(map_e) - } - - /// Produce proof for the whole backend. - pub fn record_all_keys(&mut self) { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); - - let mut iter = move || -> Result<(), Box>> { - let root = self.backend.root(); - // V1 and V is equivalent to V0 on read and recorder is key read. - record_all_keys::, _>(&eph, root, &mut *self.proof_recorder) - }; - - if let Err(e) = iter() { - debug!(target: "trie", "Error while recording all keys: {}", e); - } - } -} - -#[derive(Default)] -struct ProofRecorderInner { - /// All the records that we have stored so far. - records: HashMap>, - /// The encoded size of all recorded values. - encoded_size: usize, -} - -/// Global proof recorder, act as a layer over a hash db for recording queried data. -#[derive(Clone, Default)] -pub struct ProofRecorder { - inner: Arc>>, -} - -impl ProofRecorder { - /// Record the given `key` => `val` combination. - pub fn record(&self, key: Hash, val: Option) { - let mut inner = self.inner.write(); - let encoded_size = if let Entry::Vacant(entry) = inner.records.entry(key) { - let encoded_size = val.as_ref().map(Encode::encoded_size).unwrap_or(0); - - entry.insert(val); - encoded_size - } else { - 0 - }; - - inner.encoded_size += encoded_size; - } - - /// Returns the value at the given `key`. - pub fn get(&self, key: &Hash) -> Option> { - self.inner.read().records.get(key).cloned() - } - - /// Returns the estimated encoded size of the proof. - /// - /// The estimation is maybe bigger (by in maximum 4 bytes), but never smaller than the actual - /// encoded proof. - pub fn estimate_encoded_size(&self) -> usize { - let inner = self.inner.read(); - inner.encoded_size + codec::Compact(inner.records.len() as u32).encoded_size() - } - - /// Convert into a [`StorageProof`]. - pub fn to_storage_proof(&self) -> StorageProof { - StorageProof::new( - self.inner - .read() - .records - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())), - ) - } - - /// Reset the internal state. - pub fn reset(&self) { - let mut inner = self.inner.write(); - inner.records.clear(); - inner.encoded_size = 0; - } -} - -/// Patricia trie-based backend which also tracks all touched storage trie values. -/// These can be sent to remote node and used as a proof of execution. -pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher>( - TrieBackend, H>, -); - -/// Trie backend storage with its proof recorder. -pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - backend: &'a S, - proof_recorder: ProofRecorder, -} - -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> -where - H::Out: Codec, -{ - /// Create new proving backend. - pub fn new(backend: &'a TrieBackend) -> Self { - let proof_recorder = Default::default(); - Self::new_with_recorder(backend, proof_recorder) - } - - /// Create new proving backend with the given recorder. - pub fn new_with_recorder( - backend: &'a TrieBackend, - proof_recorder: ProofRecorder, - ) -> Self { - let essence = backend.essence(); - let root = *essence.root(); - let recorder = ProofRecorderBackend { backend: essence.backend_storage(), proof_recorder }; - ProvingBackend(TrieBackend::new(recorder, root)) - } - - /// Extracting the gathered unordered proof. - pub fn extract_proof(&self) -> StorageProof { - self.0.essence().backend_storage().proof_recorder.to_storage_proof() - } - - /// Returns the estimated encoded size of the proof. - /// - /// The estimation is maybe bigger (by in maximum 4 bytes), but never smaller than the actual - /// encoded proof. - pub fn estimate_encoded_size(&self) -> usize { - self.0.essence().backend_storage().proof_recorder.estimate_encoded_size() - } - - /// Clear the proof recorded data. - pub fn clear_recorder(&self) { - self.0.essence().backend_storage().proof_recorder.reset() - } -} - -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage - for ProofRecorderBackend<'a, S, H> -{ - type Overlay = S::Overlay; - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - if let Some(v) = self.proof_recorder.get(key) { - return Ok(v) - } - - let backend_value = self.backend.get(key, prefix)?; - self.proof_recorder.record(*key, backend_value.clone()); - Ok(backend_value) - } -} - -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> std::fmt::Debug - for ProvingBackend<'a, S, H> -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "ProvingBackend") - } -} - -impl<'a, S, H> Backend for ProvingBackend<'a, S, H> -where - S: 'a + TrieBackendStorage, - H: 'a + Hasher, - H::Out: Ord + Codec, -{ - type Error = String; - type Transaction = S::Overlay; - type TrieBackendStorage = S; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.storage(key) - } - - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.0.child_storage(child_info, key) - } - - fn apply_to_key_values_while, Vec) -> bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - allow_missing: bool, - ) -> Result { - self.0.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) - } - - fn apply_to_keys_while bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - ) { - self.0.apply_to_keys_while(child_info, prefix, start_at, f) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.next_storage_key(key) - } - - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.0.next_child_storage_key(child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.0.for_keys_with_prefix(prefix, f) - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.0.for_key_values_with_prefix(prefix, f) - } - - fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - f: F, - ) { - self.0.for_child_keys_with_prefix(child_info, prefix, f) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.0.pairs() - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.0.keys(prefix) - } - - fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { - self.0.child_keys(child_info, prefix) - } - - fn storage_root<'b>( - &self, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (H::Out, Self::Transaction) - where - H::Out: Ord, - { - self.0.storage_root(delta, state_version) - } - - fn child_storage_root<'b>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (H::Out, bool, Self::Transaction) - where - H::Out: Ord, - { - self.0.child_storage_root(child_info, delta, state_version) - } - - fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) {} - - fn usage_info(&self) -> crate::stats::UsageInfo { - self.0.usage_info() - } -} - -/// Create a backend used for checking the proof., using `H` as hasher. -/// -/// `proof` and `root` must match, i.e. `root` must be the correct root of `proof` nodes. -pub fn create_proof_check_backend( - root: H::Out, - proof: StorageProof, -) -> Result, H>, Box> -where - H: Hasher, - H::Out: Codec, -{ - let db = proof.into_memory_db(); - - if db.contains(&root, EMPTY_PREFIX) { - Ok(TrieBackend::new(db, root)) - } else { - Err(Box::new(ExecutionError::InvalidProof)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - proving_backend::create_proof_check_backend, trie_backend::tests::test_trie, - InMemoryBackend, - }; - use sp_core::H256; - use sp_runtime::traits::BlakeTwo256; - use sp_trie::PrefixedMemoryDB; - - fn test_proving( - trie_backend: &TrieBackend, BlakeTwo256>, - ) -> ProvingBackend, BlakeTwo256> { - ProvingBackend::new(trie_backend) - } - - #[test] - fn proof_is_empty_until_value_is_read() { - proof_is_empty_until_value_is_read_inner(StateVersion::V0); - proof_is_empty_until_value_is_read_inner(StateVersion::V1); - } - fn proof_is_empty_until_value_is_read_inner(test_hash: StateVersion) { - let trie_backend = test_trie(test_hash); - assert!(test_proving(&trie_backend).extract_proof().is_empty()); - } - - #[test] - fn proof_is_non_empty_after_value_is_read() { - proof_is_non_empty_after_value_is_read_inner(StateVersion::V0); - proof_is_non_empty_after_value_is_read_inner(StateVersion::V1); - } - fn proof_is_non_empty_after_value_is_read_inner(test_hash: StateVersion) { - let trie_backend = test_trie(test_hash); - let backend = test_proving(&trie_backend); - assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof().is_empty()); - } - - #[test] - fn proof_is_invalid_when_does_not_contains_root() { - let result = create_proof_check_backend::( - H256::from_low_u64_be(1), - StorageProof::empty(), - ); - assert!(result.is_err()); - } - - #[test] - fn passes_through_backend_calls() { - passes_through_backend_calls_inner(StateVersion::V0); - passes_through_backend_calls_inner(StateVersion::V1); - } - fn passes_through_backend_calls_inner(state_version: StateVersion) { - let trie_backend = test_trie(state_version); - let proving_backend = test_proving(&trie_backend); - assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); - assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - - let (trie_root, mut trie_mdb) = - trie_backend.storage_root(std::iter::empty(), state_version); - let (proving_root, mut proving_mdb) = - proving_backend.storage_root(std::iter::empty(), state_version); - assert_eq!(trie_root, proving_root); - assert_eq!(trie_mdb.drain(), proving_mdb.drain()); - } - - #[test] - fn proof_recorded_and_checked_top() { - proof_recorded_and_checked_inner(StateVersion::V0); - proof_recorded_and_checked_inner(StateVersion::V1); - } - fn proof_recorded_and_checked_inner(state_version: StateVersion) { - let size_content = 34; // above hashable value treshold. - let value_range = 0..64; - let contents = value_range - .clone() - .map(|i| (vec![i], Some(vec![i; size_content]))) - .collect::>(); - let in_memory = InMemoryBackend::::default(); - let in_memory = in_memory.update(vec![(None, contents)], state_version); - let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).0; - value_range.clone().for_each(|i| { - assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content]) - }); - - let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty(), state_version).0; - assert_eq!(in_memory_root, trie_root); - value_range - .for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); - - let proving = ProvingBackend::new(trie); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); - - let proof = proving.extract_proof(); - - let proof_check = create_proof_check_backend::(in_memory_root, proof).unwrap(); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); - } - - #[test] - fn proof_recorded_and_checked_with_child() { - proof_recorded_and_checked_with_child_inner(StateVersion::V0); - proof_recorded_and_checked_with_child_inner(StateVersion::V1); - } - fn proof_recorded_and_checked_with_child_inner(state_version: StateVersion) { - let child_info_1 = ChildInfo::new_default(b"sub1"); - let child_info_2 = ChildInfo::new_default(b"sub2"); - let child_info_1 = &child_info_1; - let child_info_2 = &child_info_2; - let contents = vec![ - (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), - (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), - ]; - let in_memory = InMemoryBackend::::default(); - let in_memory = in_memory.update(contents, state_version); - let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; - let in_memory_root = in_memory - .full_storage_root( - std::iter::empty(), - child_storage_keys.iter().map(|k| (k, std::iter::empty())), - state_version, - ) - .0; - (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); - (28..65).for_each(|i| { - assert_eq!(in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i]) - }); - (10..15).for_each(|i| { - assert_eq!(in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i]) - }); - - let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty(), state_version).0; - assert_eq!(in_memory_root, trie_root); - (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - - let proving = ProvingBackend::new(trie); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - - let proof = proving.extract_proof(); - - let proof_check = create_proof_check_backend::(in_memory_root, proof).unwrap(); - assert!(proof_check.storage(&[0]).is_err()); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); - // note that it is include in root because proof close - assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); - assert_eq!(proof_check.storage(&[64]).unwrap(), None); - - let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); - - let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::(in_memory_root, proof).unwrap(); - assert_eq!(proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), vec![64]); - } - - #[test] - fn storage_proof_encoded_size_estimation_works() { - storage_proof_encoded_size_estimation_works_inner(StateVersion::V0); - storage_proof_encoded_size_estimation_works_inner(StateVersion::V1); - } - fn storage_proof_encoded_size_estimation_works_inner(state_version: StateVersion) { - let trie_backend = test_trie(state_version); - let backend = test_proving(&trie_backend); - - let check_estimation = - |backend: &ProvingBackend<'_, PrefixedMemoryDB, BlakeTwo256>| { - let storage_proof = backend.extract_proof(); - let estimation = - backend.0.essence().backend_storage().proof_recorder.estimate_encoded_size(); - - assert_eq!(storage_proof.encoded_size(), estimation); - }; - - assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - check_estimation(&backend); - - assert_eq!(backend.storage(b"value1").unwrap(), Some(vec![42])); - check_estimation(&backend); - - assert_eq!(backend.storage(b"value2").unwrap(), Some(vec![24])); - check_estimation(&backend); - - assert!(backend.storage(b"doesnotexist").unwrap().is_none()); - check_estimation(&backend); - - assert!(backend.storage(b"doesnotexist2").unwrap().is_none()); - check_estimation(&backend); - } - - #[test] - fn proof_recorded_for_same_execution_should_be_deterministic() { - let storage_changes = vec![ - (H256::random(), Some(b"value1".to_vec())), - (H256::random(), Some(b"value2".to_vec())), - (H256::random(), Some(b"value3".to_vec())), - (H256::random(), Some(b"value4".to_vec())), - (H256::random(), Some(b"value5".to_vec())), - (H256::random(), Some(b"value6".to_vec())), - (H256::random(), Some(b"value7".to_vec())), - (H256::random(), Some(b"value8".to_vec())), - ]; - - let proof_recorder = - ProofRecorder:: { inner: Arc::new(RwLock::new(ProofRecorderInner::default())) }; - storage_changes - .clone() - .into_iter() - .for_each(|(key, val)| proof_recorder.record(key, val)); - let proof1 = proof_recorder.to_storage_proof(); - - let proof_recorder = - ProofRecorder:: { inner: Arc::new(RwLock::new(ProofRecorderInner::default())) }; - storage_changes - .into_iter() - .for_each(|(key, val)| proof_recorder.record(key, val)); - let proof2 = proof_recorder.to_storage_proof(); - - assert_eq!(proof1, proof2); - } -} diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 622915a2d0525..e58fb760f4d7e 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -23,7 +23,6 @@ use hash_db::Hasher; use sp_core::{ storage::{ChildInfo, StateVersion, TrackedStorageKey}, traits::Externalities, - Blake2Hasher, }; use sp_externalities::MultiRemovalResults; use std::{ @@ -44,7 +43,10 @@ pub trait InspectState> { fn inspect_state R, R>(&self, f: F) -> R; } -impl> InspectState for B { +impl> InspectState for B +where + H::Out: Encode, +{ fn inspect_state R, R>(&self, f: F) -> R { ReadOnlyExternalities::from(self).execute_with(f) } @@ -66,7 +68,10 @@ impl<'a, H: Hasher, B: 'a + Backend> From<&'a B> for ReadOnlyExternalities<'a } } -impl<'a, H: Hasher, B: 'a + Backend> ReadOnlyExternalities<'a, H, B> { +impl<'a, H: Hasher, B: 'a + Backend> ReadOnlyExternalities<'a, H, B> +where + H::Out: Encode, +{ /// Execute the given closure while `self` is set as externalities. /// /// Returns the result of the given closure. @@ -75,7 +80,10 @@ impl<'a, H: Hasher, B: 'a + Backend> ReadOnlyExternalities<'a, H, B> { } } -impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities<'a, H, B> { +impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities<'a, H, B> +where + H::Out: Encode, +{ fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) { panic!("Should not be used in read-only externalities!") } @@ -87,7 +95,10 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< } fn storage_hash(&self, key: &[u8]) -> Option> { - self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) + self.backend + .storage_hash(key) + .expect("Backed failed for storage_hash in ReadOnlyExternalities") + .map(|h| h.encode()) } fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { @@ -97,7 +108,10 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< } fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { - self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) + self.backend + .child_storage_hash(child_info, key) + .expect("Backed failed for child_storage_hash in ReadOnlyExternalities") + .map(|h| h.encode()) } fn next_storage_key(&self, key: &[u8]) -> Option { diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 57d4f0b4898eb..46b7573d9d1a6 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -24,7 +24,7 @@ use std::{ use crate::{ backend::Backend, ext::Ext, InMemoryBackend, OverlayedChanges, StorageKey, - StorageTransactionCache, StorageValue, + StorageTransactionCache, StorageValue, TrieBackendBuilder, }; use hash_db::Hasher; @@ -41,8 +41,9 @@ use sp_externalities::{Extension, ExtensionStore, Extensions}; use sp_trie::StorageProof; /// Simple HashMap-based Externalities impl. -pub struct TestExternalities +pub struct TestExternalities where + H: Hasher + 'static, H::Out: codec::Codec + Ord, { /// The overlay changed storage. @@ -58,8 +59,9 @@ where pub state_version: StateVersion, } -impl TestExternalities +impl TestExternalities where + H: Hasher + 'static, H::Out: Ord + 'static + codec::Codec, { /// Get externalities implementation. @@ -202,7 +204,9 @@ where /// This implementation will wipe the proof recorded in between calls. Consecutive calls will /// get their own proof from scratch. pub fn execute_and_prove(&mut self, execute: impl FnOnce() -> R) -> (R, StorageProof) { - let proving_backend = crate::InMemoryProvingBackend::new(&self.backend); + let proving_backend = TrieBackendBuilder::wrap(&self.backend) + .with_recorder(Default::default()) + .build(); let mut proving_ext = Ext::new( &mut self.overlay, &mut self.storage_transaction_cache, @@ -211,7 +215,7 @@ where ); let outcome = sp_externalities::set_and_run_with_externalities(&mut proving_ext, execute); - let proof = proving_backend.extract_proof(); + let proof = proving_backend.extract_proof().expect("Failed to extract storage proof"); (outcome, proof) } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 130b4bf178202..447c276a6049c 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -17,31 +17,182 @@ //! Trie-based state machine backend. +#[cfg(feature = "std")] +use crate::backend::AsTrieBackend; use crate::{ trie_backend_essence::{TrieBackendEssence, TrieBackendStorage}, Backend, StorageKey, StorageValue, }; use codec::Codec; +#[cfg(feature = "std")] +use hash_db::HashDB; use hash_db::Hasher; use sp_core::storage::{ChildInfo, StateVersion}; use sp_std::vec::Vec; +#[cfg(feature = "std")] +use sp_trie::{cache::LocalTrieCache, recorder::Recorder}; +#[cfg(feature = "std")] +use sp_trie::{MemoryDB, StorageProof}; -/// Patricia trie-based backend. Transaction type is an overlay of changes to commit. -pub struct TrieBackend, H: Hasher> { - pub(crate) essence: TrieBackendEssence, +/// Dummy type to be used in `no_std`. +/// +/// This is required to have the type available for [`TrieBackendBuilder`] and [`TrieBackend`]. +#[cfg(not(feature = "std"))] +pub struct LocalTrieCache(sp_std::marker::PhantomData); + +/// Special trait to support taking the [`LocalTrieCache`] by value or by reference. +/// +/// This trait is internal use only and to emphasize this, the trait is sealed. +pub trait AsLocalTrieCache: sealed::Sealed { + /// Returns `self` as [`LocalTrieCache`]. + #[cfg(feature = "std")] + fn as_local_trie_cache(&self) -> &LocalTrieCache; +} + +impl AsLocalTrieCache for LocalTrieCache { + #[cfg(feature = "std")] + fn as_local_trie_cache(&self) -> &LocalTrieCache { + self + } +} + +#[cfg(feature = "std")] +impl AsLocalTrieCache for &LocalTrieCache { + fn as_local_trie_cache(&self) -> &LocalTrieCache { + self + } +} + +/// Special module that contains the `Sealed` trait. +mod sealed { + use super::*; + + /// A special trait which prevents externals to implement the [`AsLocalTrieCache`] outside + /// of this crate. + pub trait Sealed {} + + impl Sealed for LocalTrieCache {} + impl Sealed for &LocalTrieCache {} +} + +/// Builder for creating a [`TrieBackend`]. +pub struct TrieBackendBuilder, H: Hasher, C = LocalTrieCache> { + storage: S, + root: H::Out, + #[cfg(feature = "std")] + recorder: Option>, + cache: Option, } -impl, H: Hasher> TrieBackend +impl TrieBackendBuilder> where - H::Out: Codec, + S: TrieBackendStorage, + H: Hasher, { - /// Create new trie-based backend. + /// Create a new builder instance. pub fn new(storage: S, root: H::Out) -> Self { - TrieBackend { essence: TrieBackendEssence::new(storage, root) } + Self { + storage, + root, + #[cfg(feature = "std")] + recorder: None, + cache: None, + } + } +} + +impl TrieBackendBuilder +where + S: TrieBackendStorage, + H: Hasher, +{ + /// Wrap the given [`TrieBackend`]. + /// + /// This can be used for example if all accesses to the trie should + /// be recorded while some other functionality still uses the non-recording + /// backend. + /// + /// The backend storage and the cache will be taken from `other`. + pub fn wrap(other: &TrieBackend) -> TrieBackendBuilder<&S, H, &C> { + TrieBackendBuilder { + storage: other.essence.backend_storage(), + root: *other.essence.root(), + #[cfg(feature = "std")] + recorder: None, + #[cfg(feature = "std")] + cache: other.essence.trie_node_cache.as_ref(), + #[cfg(not(feature = "std"))] + cache: None, + } + } + + /// Use the given optional `recorder` for the to be configured [`TrieBackend`]. + #[cfg(feature = "std")] + pub fn with_optional_recorder(self, recorder: Option>) -> Self { + Self { recorder, ..self } } + /// Use the given `recorder` for the to be configured [`TrieBackend`]. + #[cfg(feature = "std")] + pub fn with_recorder(self, recorder: Recorder) -> Self { + Self { recorder: Some(recorder), ..self } + } + + /// Use the given optional `cache` for the to be configured [`TrieBackend`]. + #[cfg(feature = "std")] + pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder { + TrieBackendBuilder { + cache, + root: self.root, + storage: self.storage, + recorder: self.recorder, + } + } + + /// Use the given `cache` for the to be configured [`TrieBackend`]. + #[cfg(feature = "std")] + pub fn with_cache(self, cache: LC) -> TrieBackendBuilder { + TrieBackendBuilder { + cache: Some(cache), + root: self.root, + storage: self.storage, + recorder: self.recorder, + } + } + + /// Build the configured [`TrieBackend`]. + #[cfg(feature = "std")] + pub fn build(self) -> TrieBackend { + TrieBackend { + essence: TrieBackendEssence::new_with_cache_and_recorder( + self.storage, + self.root, + self.cache, + self.recorder, + ), + } + } + + /// Build the configured [`TrieBackend`]. + #[cfg(not(feature = "std"))] + pub fn build(self) -> TrieBackend { + let _ = self.cache; + + TrieBackend { essence: TrieBackendEssence::new(self.storage, self.root) } + } +} + +/// Patricia trie-based backend. Transaction type is an overlay of changes to commit. +pub struct TrieBackend, H: Hasher, C = LocalTrieCache> { + pub(crate) essence: TrieBackendEssence, +} + +impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> TrieBackend +where + H::Out: Codec, +{ /// Get backend essence reference. - pub fn essence(&self) -> &TrieBackendEssence { + pub fn essence(&self) -> &TrieBackendEssence { &self.essence } @@ -59,15 +210,26 @@ where pub fn into_storage(self) -> S { self.essence.into_storage() } + + /// Extract the [`StorageProof`]. + /// + /// This only returns `Some` when there was a recorder set. + #[cfg(feature = "std")] + pub fn extract_proof(mut self) -> Option { + self.essence.recorder.take().map(|r| r.drain_storage_proof()) + } } -impl, H: Hasher> sp_std::fmt::Debug for TrieBackend { +impl, H: Hasher, C: AsLocalTrieCache> sp_std::fmt::Debug + for TrieBackend +{ fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { write!(f, "TrieBackend") } } -impl, H: Hasher> Backend for TrieBackend +impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> Backend + for TrieBackend where H::Out: Ord + Codec, { @@ -75,10 +237,22 @@ where type Transaction = S::Overlay; type TrieBackendStorage = S; + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.essence.storage_hash(key) + } + fn storage(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage(key) } + fn child_storage_hash( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + self.essence.child_storage_hash(child_info, key) + } + fn child_storage( &self, child_info: &ChildInfo, @@ -169,10 +343,6 @@ where self.essence.child_storage_root(child_info, delta, state_version) } - fn as_trie_backend(&self) -> Option<&TrieBackend> { - Some(self) - } - fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) {} fn usage_info(&self) -> crate::UsageInfo { @@ -184,20 +354,97 @@ where } } +#[cfg(feature = "std")] +impl, H: Hasher, C> AsTrieBackend for TrieBackend { + type TrieBackendStorage = S; + + fn as_trie_backend(&self) -> &TrieBackend { + self + } +} + +/// Create a backend used for checking the proof, using `H` as hasher. +/// +/// `proof` and `root` must match, i.e. `root` must be the correct root of `proof` nodes. +#[cfg(feature = "std")] +pub fn create_proof_check_backend( + root: H::Out, + proof: StorageProof, +) -> Result, H>, Box> +where + H: Hasher, + H::Out: Codec, +{ + let db = proof.into_memory_db(); + + if db.contains(&root, hash_db::EMPTY_PREFIX) { + Ok(TrieBackendBuilder::new(db, root).build()) + } else { + Err(Box::new(crate::ExecutionError::InvalidProof)) + } +} + #[cfg(test)] pub mod tests { + use crate::{new_in_mem, InMemoryBackend}; + use super::*; use codec::Encode; use sp_core::H256; use sp_runtime::traits::BlakeTwo256; use sp_trie::{ - trie_types::{TrieDB, TrieDBMutV0, TrieDBMutV1}, - KeySpacedDBMut, PrefixedMemoryDB, Trie, TrieMut, + cache::{CacheSize, SharedTrieCache}, + trie_types::{TrieDBBuilder, TrieDBMutBuilderV0, TrieDBMutBuilderV1}, + KeySpacedDBMut, PrefixedKey, PrefixedMemoryDB, Trie, TrieCache, TrieMut, }; use std::{collections::HashSet, iter}; + use trie_db::NodeCodec; const CHILD_KEY_1: &[u8] = b"sub1"; + type Recorder = sp_trie::recorder::Recorder; + type Cache = LocalTrieCache; + type SharedCache = SharedTrieCache; + + macro_rules! parameterized_test { + ($name:ident, $internal_name:ident) => { + #[test] + fn $name() { + let parameters = vec![ + (StateVersion::V0, None, None), + (StateVersion::V0, Some(SharedCache::new(CacheSize::Unlimited)), None), + (StateVersion::V0, None, Some(Recorder::default())), + ( + StateVersion::V0, + Some(SharedCache::new(CacheSize::Unlimited)), + Some(Recorder::default()), + ), + (StateVersion::V1, None, None), + (StateVersion::V1, Some(SharedCache::new(CacheSize::Unlimited)), None), + (StateVersion::V1, None, Some(Recorder::default())), + ( + StateVersion::V1, + Some(SharedCache::new(CacheSize::Unlimited)), + Some(Recorder::default()), + ), + ]; + + for (version, cache, recorder) in parameters { + eprintln!( + "Running with version {:?}, cache enabled {} and recorder enabled {}", + version, + cache.is_some(), + recorder.is_some() + ); + + let cache = cache.as_ref().map(|c| c.local_cache()); + + $internal_name(version, cache, recorder.clone()); + } + } + }; + } + pub(crate) fn test_db(state_version: StateVersion) -> (PrefixedMemoryDB, H256) { let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); @@ -206,12 +453,12 @@ pub mod tests { let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); match state_version { StateVersion::V0 => { - let mut trie = TrieDBMutV0::new(&mut mdb, &mut root); + let mut trie = TrieDBMutBuilderV0::new(&mut mdb, &mut root).build(); trie.insert(b"value3", &[142; 33]).expect("insert failed"); trie.insert(b"value4", &[124; 33]).expect("insert failed"); }, StateVersion::V1 => { - let mut trie = TrieDBMutV1::new(&mut mdb, &mut root); + let mut trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); trie.insert(b"value3", &[142; 33]).expect("insert failed"); trie.insert(b"value4", &[124; 33]).expect("insert failed"); }, @@ -240,11 +487,11 @@ pub mod tests { match state_version { StateVersion::V0 => { - let trie = TrieDBMutV0::new(&mut mdb, &mut root); + let trie = TrieDBMutBuilderV0::new(&mut mdb, &mut root).build(); build(trie, &child_info, &sub_root[..]) }, StateVersion::V1 => { - let trie = TrieDBMutV1::new(&mut mdb, &mut root); + let trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); build(trie, &child_info, &sub_root[..]) }, }; @@ -254,27 +501,39 @@ pub mod tests { pub(crate) fn test_trie( hashed_value: StateVersion, + cache: Option, + recorder: Option, ) -> TrieBackend, BlakeTwo256> { let (mdb, root) = test_db(hashed_value); - TrieBackend::new(mdb, root) - } - #[test] - fn read_from_storage_returns_some() { - read_from_storage_returns_some_inner(StateVersion::V0); - read_from_storage_returns_some_inner(StateVersion::V1); - } - fn read_from_storage_returns_some_inner(state_version: StateVersion) { - assert_eq!(test_trie(state_version).storage(b"key").unwrap(), Some(b"value".to_vec())); + TrieBackendBuilder::new(mdb, root) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build() } - #[test] - fn read_from_child_storage_returns_some() { - read_from_child_storage_returns_some_inner(StateVersion::V0); - read_from_child_storage_returns_some_inner(StateVersion::V1); + parameterized_test!(read_from_storage_returns_some, read_from_storage_returns_some_inner); + fn read_from_storage_returns_some_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + assert_eq!( + test_trie(state_version, cache, recorder).storage(b"key").unwrap(), + Some(b"value".to_vec()) + ); } - fn read_from_child_storage_returns_some_inner(state_version: StateVersion) { - let test_trie = test_trie(state_version); + + parameterized_test!( + read_from_child_storage_returns_some, + read_from_child_storage_returns_some_inner + ); + fn read_from_child_storage_returns_some_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let test_trie = test_trie(state_version, cache, recorder); assert_eq!( test_trie .child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3") @@ -299,65 +558,81 @@ pub mod tests { ); } - #[test] - fn read_from_storage_returns_none() { - read_from_storage_returns_none_inner(StateVersion::V0); - read_from_storage_returns_none_inner(StateVersion::V1); - } - fn read_from_storage_returns_none_inner(state_version: StateVersion) { - assert_eq!(test_trie(state_version).storage(b"non-existing-key").unwrap(), None); + parameterized_test!(read_from_storage_returns_none, read_from_storage_returns_none_inner); + fn read_from_storage_returns_none_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + assert_eq!( + test_trie(state_version, cache, recorder).storage(b"non-existing-key").unwrap(), + None + ); } - #[test] - fn pairs_are_not_empty_on_non_empty_storage() { - pairs_are_not_empty_on_non_empty_storage_inner(StateVersion::V0); - pairs_are_not_empty_on_non_empty_storage_inner(StateVersion::V1); - } - fn pairs_are_not_empty_on_non_empty_storage_inner(state_version: StateVersion) { - assert!(!test_trie(state_version).pairs().is_empty()); + parameterized_test!( + pairs_are_not_empty_on_non_empty_storage, + pairs_are_not_empty_on_non_empty_storage_inner + ); + fn pairs_are_not_empty_on_non_empty_storage_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + assert!(!test_trie(state_version, cache, recorder).pairs().is_empty()); } #[test] fn pairs_are_empty_on_empty_storage() { - assert!(TrieBackend::, BlakeTwo256>::new( + assert!(TrieBackendBuilder::, BlakeTwo256>::new( PrefixedMemoryDB::default(), Default::default(), ) + .build() .pairs() .is_empty()); } - #[test] - fn storage_root_is_non_default() { - storage_root_is_non_default_inner(StateVersion::V0); - storage_root_is_non_default_inner(StateVersion::V1); - } - fn storage_root_is_non_default_inner(state_version: StateVersion) { + parameterized_test!(storage_root_is_non_default, storage_root_is_non_default_inner); + fn storage_root_is_non_default_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { assert!( - test_trie(state_version).storage_root(iter::empty(), state_version).0 != - H256::repeat_byte(0) + test_trie(state_version, cache, recorder) + .storage_root(iter::empty(), state_version) + .0 != H256::repeat_byte(0) ); } - #[test] - fn storage_root_transaction_is_non_empty() { - storage_root_transaction_is_non_empty_inner(StateVersion::V0); - storage_root_transaction_is_non_empty_inner(StateVersion::V1); - } - fn storage_root_transaction_is_non_empty_inner(state_version: StateVersion) { - let (new_root, mut tx) = test_trie(state_version) + parameterized_test!( + storage_root_transaction_is_non_empty, + storage_root_transaction_is_non_empty_inner + ); + fn storage_root_transaction_is_non_empty_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let (new_root, mut tx) = test_trie(state_version, cache, recorder) .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_version); assert!(!tx.drain().is_empty()); - assert!(new_root != test_trie(state_version).storage_root(iter::empty(), state_version).0); + assert!( + new_root != + test_trie(state_version, None, None) + .storage_root(iter::empty(), state_version) + .0 + ); } - #[test] - fn prefix_walking_works() { - prefix_walking_works_inner(StateVersion::V0); - prefix_walking_works_inner(StateVersion::V1); - } - fn prefix_walking_works_inner(state_version: StateVersion) { - let trie = test_trie(state_version); + parameterized_test!(prefix_walking_works, prefix_walking_works_inner); + fn prefix_walking_works_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let trie = test_trie(state_version, cache, recorder); let mut seen = HashSet::new(); trie.for_keys_with_prefix(b"value", |key| { @@ -371,23 +646,475 @@ pub mod tests { assert_eq!(seen, expected); } - #[test] - fn keys_with_empty_prefix_returns_all_keys() { - keys_with_empty_prefix_returns_all_keys_inner(StateVersion::V0); - keys_with_empty_prefix_returns_all_keys_inner(StateVersion::V1); - } - fn keys_with_empty_prefix_returns_all_keys_inner(state_version: StateVersion) { + parameterized_test!( + keys_with_empty_prefix_returns_all_keys, + keys_with_empty_prefix_returns_all_keys_inner + ); + fn keys_with_empty_prefix_returns_all_keys_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { let (test_db, test_root) = test_db(state_version); - let expected = TrieDB::new(&test_db, &test_root) - .unwrap() + let expected = TrieDBBuilder::new(&test_db, &test_root) + .build() .iter() .unwrap() .map(|d| d.unwrap().0.to_vec()) .collect::>(); - let trie = test_trie(state_version); + let trie = test_trie(state_version, cache, recorder); let keys = trie.keys(&[]); assert_eq!(expected, keys); } + + parameterized_test!( + proof_is_empty_until_value_is_read, + proof_is_empty_until_value_is_read_inner + ); + fn proof_is_empty_until_value_is_read_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let trie_backend = test_trie(state_version, cache, recorder); + assert!(TrieBackendBuilder::wrap(&trie_backend) + .with_recorder(Recorder::default()) + .build() + .extract_proof() + .unwrap() + .is_empty()); + } + + parameterized_test!( + proof_is_non_empty_after_value_is_read, + proof_is_non_empty_after_value_is_read_inner + ); + fn proof_is_non_empty_after_value_is_read_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let trie_backend = test_trie(state_version, cache, recorder); + let backend = TrieBackendBuilder::wrap(&trie_backend) + .with_recorder(Recorder::default()) + .build(); + assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); + assert!(!backend.extract_proof().unwrap().is_empty()); + } + + #[test] + fn proof_is_invalid_when_does_not_contains_root() { + let result = create_proof_check_backend::( + H256::from_low_u64_be(1), + StorageProof::empty(), + ); + assert!(result.is_err()); + } + + parameterized_test!(passes_through_backend_calls, passes_through_backend_calls_inner); + fn passes_through_backend_calls_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let trie_backend = test_trie(state_version, cache, recorder); + let proving_backend = TrieBackendBuilder::wrap(&trie_backend) + .with_recorder(Recorder::default()) + .build(); + assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); + assert_eq!(trie_backend.pairs(), proving_backend.pairs()); + + let (trie_root, mut trie_mdb) = + trie_backend.storage_root(std::iter::empty(), state_version); + let (proving_root, mut proving_mdb) = + proving_backend.storage_root(std::iter::empty(), state_version); + assert_eq!(trie_root, proving_root); + assert_eq!(trie_mdb.drain(), proving_mdb.drain()); + } + + #[test] + fn proof_recorded_and_checked_top() { + proof_recorded_and_checked_inner(StateVersion::V0); + proof_recorded_and_checked_inner(StateVersion::V1); + } + fn proof_recorded_and_checked_inner(state_version: StateVersion) { + let size_content = 34; // above hashable value threshold. + let value_range = 0..64; + let contents = value_range + .clone() + .map(|i| (vec![i], Some(vec![i; size_content]))) + .collect::>(); + let in_memory = InMemoryBackend::::default(); + let in_memory = in_memory.update(vec![(None, contents)], state_version); + let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).0; + value_range.clone().for_each(|i| { + assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content]) + }); + + let trie = in_memory.as_trie_backend(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + assert_eq!(in_memory_root, trie_root); + value_range + .clone() + .for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); + + for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { + // Run multiple times to have a different cache conditions. + for i in 0..5 { + if let Some(cache) = &cache { + if i == 2 { + cache.reset_node_cache(); + } else if i == 3 { + cache.reset_value_cache(); + } + } + + let proving = TrieBackendBuilder::wrap(&trie) + .with_recorder(Recorder::default()) + .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) + .build(); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); + + let proof = proving.extract_proof().unwrap(); + + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof) + .unwrap(); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); + } + } + } + + #[test] + fn proof_record_works_with_iter() { + proof_record_works_with_iter_inner(StateVersion::V0); + proof_record_works_with_iter_inner(StateVersion::V1); + } + fn proof_record_works_with_iter_inner(state_version: StateVersion) { + for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { + // Run multiple times to have a different cache conditions. + for i in 0..5 { + if let Some(cache) = &cache { + if i == 2 { + cache.reset_node_cache(); + } else if i == 3 { + cache.reset_value_cache(); + } + } + + let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); + let in_memory = InMemoryBackend::::default(); + let in_memory = in_memory.update(vec![(None, contents)], state_version); + let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).0; + (0..64) + .for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + + let trie = in_memory.as_trie_backend(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + assert_eq!(in_memory_root, trie_root); + (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); + + let proving = TrieBackendBuilder::wrap(&trie) + .with_recorder(Recorder::default()) + .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) + .build(); + + (0..63).for_each(|i| { + assert_eq!(proving.next_storage_key(&[i]).unwrap(), Some(vec![i + 1])) + }); + + let proof = proving.extract_proof().unwrap(); + + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof) + .unwrap(); + (0..63).for_each(|i| { + assert_eq!(proof_check.next_storage_key(&[i]).unwrap(), Some(vec![i + 1])) + }); + } + } + } + + #[test] + fn proof_recorded_and_checked_with_child() { + proof_recorded_and_checked_with_child_inner(StateVersion::V0); + proof_recorded_and_checked_with_child_inner(StateVersion::V1); + } + fn proof_recorded_and_checked_with_child_inner(state_version: StateVersion) { + let child_info_1 = ChildInfo::new_default(b"sub1"); + let child_info_2 = ChildInfo::new_default(b"sub2"); + let child_info_1 = &child_info_1; + let child_info_2 = &child_info_2; + let contents = vec![ + (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), + (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), + (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), + ]; + let in_memory = new_in_mem::>(); + let in_memory = in_memory.update(contents, state_version); + let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; + let in_memory_root = in_memory + .full_storage_root( + std::iter::empty(), + child_storage_keys.iter().map(|k| (k, std::iter::empty())), + state_version, + ) + .0; + (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + (28..65).for_each(|i| { + assert_eq!(in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i]) + }); + (10..15).for_each(|i| { + assert_eq!(in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i]) + }); + + for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { + // Run multiple times to have a different cache conditions. + for i in 0..5 { + eprintln!("Running with cache {}, iteration {}", cache.is_some(), i); + + if let Some(cache) = &cache { + if i == 2 { + cache.reset_node_cache(); + } else if i == 3 { + cache.reset_value_cache(); + } + } + + let trie = in_memory.as_trie_backend(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + assert_eq!(in_memory_root, trie_root); + (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); + + let proving = TrieBackendBuilder::wrap(&trie) + .with_recorder(Recorder::default()) + .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) + .build(); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); + + let proof = proving.extract_proof().unwrap(); + + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof) + .unwrap(); + assert!(proof_check.storage(&[0]).is_err()); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); + // note that it is include in root because proof close + assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); + assert_eq!(proof_check.storage(&[64]).unwrap(), None); + + let proving = TrieBackendBuilder::wrap(&trie) + .with_recorder(Recorder::default()) + .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) + .build(); + assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); + assert_eq!(proving.child_storage(child_info_1, &[25]), Ok(None)); + assert_eq!(proving.child_storage(child_info_2, &[14]), Ok(Some(vec![14]))); + assert_eq!(proving.child_storage(child_info_2, &[25]), Ok(None)); + + let proof = proving.extract_proof().unwrap(); + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof) + .unwrap(); + assert_eq!( + proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), + vec![64] + ); + assert_eq!(proof_check.child_storage(child_info_1, &[25]).unwrap(), None); + + assert_eq!( + proof_check.child_storage(child_info_2, &[14]).unwrap().unwrap(), + vec![14] + ); + assert_eq!(proof_check.child_storage(child_info_2, &[25]).unwrap(), None); + } + } + } + + /// This tests an edge case when recording a child trie access with a cache. + /// + /// The accessed value/node is in the cache, but not the nodes to get to this value. So, + /// the recorder will need to traverse the trie to access these nodes from the backend when the + /// storage proof is generated. + #[test] + fn child_proof_recording_with_edge_cases_works() { + child_proof_recording_with_edge_cases_works_inner(StateVersion::V0); + child_proof_recording_with_edge_cases_works_inner(StateVersion::V1); + } + fn child_proof_recording_with_edge_cases_works_inner(state_version: StateVersion) { + let child_info_1 = ChildInfo::new_default(b"sub1"); + let child_info_1 = &child_info_1; + let contents = vec![ + (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), + ( + Some(child_info_1.clone()), + (28..65) + .map(|i| (vec![i], Some(vec![i]))) + // Some big value to ensure we get a new node + .chain(std::iter::once((vec![65], Some(vec![65; 128])))) + .collect(), + ), + ]; + let in_memory = new_in_mem::>(); + let in_memory = in_memory.update(contents, state_version); + let child_storage_keys = vec![child_info_1.to_owned()]; + let in_memory_root = in_memory + .full_storage_root( + std::iter::empty(), + child_storage_keys.iter().map(|k| (k, std::iter::empty())), + state_version, + ) + .0; + + let child_1_root = + in_memory.child_storage_root(child_info_1, std::iter::empty(), state_version).0; + let trie = in_memory.as_trie_backend(); + let nodes = { + let backend = TrieBackendBuilder::wrap(trie).with_recorder(Default::default()).build(); + let value = backend.child_storage(child_info_1, &[65]).unwrap().unwrap(); + let value_hash = BlakeTwo256::hash(&value); + assert_eq!(value, vec![65; 128]); + + let proof = backend.extract_proof().unwrap(); + + let mut nodes = Vec::new(); + for node in proof.iter_nodes() { + let hash = BlakeTwo256::hash(&node); + // Only insert the node/value that contains the important data. + if hash != value_hash { + let node = sp_trie::NodeCodec::::decode(&node) + .unwrap() + .to_owned_node::>() + .unwrap(); + + if let Some(data) = node.data() { + if data == &vec![65; 128] { + nodes.push((hash, node)); + } + } + } else if hash == value_hash { + nodes.push((hash, trie_db::node::NodeOwned::Value(node.into(), hash))); + } + } + + nodes + }; + + let cache = SharedTrieCache::::new(CacheSize::Unlimited); + { + let local_cache = cache.local_cache(); + let mut trie_cache = local_cache.as_trie_db_cache(child_1_root); + + // Put the value/node into the cache. + for (hash, node) in nodes { + trie_cache.get_or_insert_node(hash, &mut || Ok(node.clone())).unwrap(); + + if let Some(data) = node.data() { + trie_cache.cache_value_for_key(&[65], (data.clone(), hash).into()); + } + } + } + + { + // Record the access + let proving = TrieBackendBuilder::wrap(&trie) + .with_recorder(Recorder::default()) + .with_cache(cache.local_cache()) + .build(); + assert_eq!(proving.child_storage(child_info_1, &[65]), Ok(Some(vec![65; 128]))); + + let proof = proving.extract_proof().unwrap(); + // And check that we have a correct proof. + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + assert_eq!( + proof_check.child_storage(child_info_1, &[65]).unwrap().unwrap(), + vec![65; 128] + ); + } + } + + parameterized_test!( + storage_proof_encoded_size_estimation_works, + storage_proof_encoded_size_estimation_works_inner + ); + fn storage_proof_encoded_size_estimation_works_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let has_cache = cache.is_some(); + let trie_backend = test_trie(state_version, cache, recorder); + let keys = &[ + &b"key"[..], + &b"value1"[..], + &b"value2"[..], + &b"doesnotexist"[..], + &b"doesnotexist2"[..], + ]; + + fn check_estimation( + backend: TrieBackend< + impl TrieBackendStorage, + BlakeTwo256, + &'_ LocalTrieCache, + >, + has_cache: bool, + ) { + let estimation = backend.essence.recorder.as_ref().unwrap().estimate_encoded_size(); + let storage_proof = backend.extract_proof().unwrap(); + let storage_proof_size = + storage_proof.into_nodes().into_iter().map(|n| n.encoded_size()).sum::(); + + if has_cache { + // Estimation is not entirely correct when we have values already cached. + assert!(estimation >= storage_proof_size) + } else { + assert_eq!(storage_proof_size, estimation); + } + } + + for n in 0..keys.len() { + let backend = TrieBackendBuilder::wrap(&trie_backend) + .with_recorder(Recorder::default()) + .build(); + + // Read n keys + (0..n).for_each(|i| { + backend.storage(keys[i]).unwrap(); + }); + + // Check the estimation + check_estimation(backend, has_cache); + } + } + + #[test] + fn new_data_is_added_to_the_cache() { + let shared_cache = SharedTrieCache::new(CacheSize::Unlimited); + let new_data = vec![ + (&b"new_data0"[..], Some(&b"0"[..])), + (&b"new_data1"[..], Some(&b"1"[..])), + (&b"new_data2"[..], Some(&b"2"[..])), + (&b"new_data3"[..], Some(&b"3"[..])), + (&b"new_data4"[..], Some(&b"4"[..])), + ]; + + let new_root = { + let trie = test_trie(StateVersion::V1, Some(shared_cache.local_cache()), None); + trie.storage_root(new_data.clone().into_iter(), StateVersion::V1).0 + }; + + let local_cache = shared_cache.local_cache(); + let mut cache = local_cache.as_trie_db_cache(new_root); + // All the data should be cached now + for (key, value) in new_data { + assert_eq!( + value.unwrap(), + cache.lookup_value_for_key(key).unwrap().data().flatten().unwrap().as_ref() + ); + } + } } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 7d910cc9602cc..dda7b51ab08c6 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -18,23 +18,32 @@ //! Trie-based state machine backend essence used to read values //! from storage. -use crate::{backend::Consolidate, debug, warn, StorageKey, StorageValue}; -use codec::Encode; +use crate::{ + backend::Consolidate, debug, trie_backend::AsLocalTrieCache, warn, StorageKey, StorageValue, +}; +use codec::Codec; use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; #[cfg(feature = "std")] use parking_lot::RwLock; use sp_core::storage::{ChildInfo, ChildType, StateVersion}; +#[cfg(not(feature = "std"))] +use sp_std::marker::PhantomData; use sp_std::{boxed::Box, vec::Vec}; +#[cfg(feature = "std")] +use sp_trie::recorder::Recorder; use sp_trie::{ - child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_value, - read_trie_value, - trie_types::{TrieDB, TrieError}, - DBValue, KeySpacedDB, LayoutV1 as Layout, Trie, TrieDBIterator, TrieDBKeyIterator, + child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_hash, + read_child_trie_value, read_trie_value, + trie_types::{TrieDBBuilder, TrieError}, + DBValue, KeySpacedDB, NodeCodec, Trie, TrieCache, TrieDBIterator, TrieDBKeyIterator, + TrieRecorder, }; #[cfg(feature = "std")] -use std::collections::HashMap; -#[cfg(feature = "std")] -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; + +// In this module, we only use layout for read operation and empty root, +// where V1 and V0 are equivalent. +use sp_trie::LayoutV1 as Layout; #[cfg(not(feature = "std"))] macro_rules! format { @@ -68,18 +77,21 @@ impl Cache { } /// Patricia trie-based pairs storage essence. -pub struct TrieBackendEssence, H: Hasher> { +pub struct TrieBackendEssence, H: Hasher, C> { storage: S, root: H::Out, empty: H::Out, #[cfg(feature = "std")] pub(crate) cache: Arc>>, + #[cfg(feature = "std")] + pub(crate) trie_node_cache: Option, + #[cfg(feature = "std")] + pub(crate) recorder: Option>, + #[cfg(not(feature = "std"))] + _phantom: PhantomData, } -impl, H: Hasher> TrieBackendEssence -where - H::Out: Encode, -{ +impl, H: Hasher, C> TrieBackendEssence { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { TrieBackendEssence { @@ -88,6 +100,30 @@ where empty: H::hash(&[0u8]), #[cfg(feature = "std")] cache: Arc::new(RwLock::new(Cache::new())), + #[cfg(feature = "std")] + trie_node_cache: None, + #[cfg(feature = "std")] + recorder: None, + #[cfg(not(feature = "std"))] + _phantom: PhantomData, + } + } + + /// Create new trie-based backend. + #[cfg(feature = "std")] + pub fn new_with_cache_and_recorder( + storage: S, + root: H::Out, + cache: Option, + recorder: Option>, + ) -> Self { + TrieBackendEssence { + storage, + root, + empty: H::hash(&[0u8]), + cache: Arc::new(RwLock::new(Cache::new())), + trie_node_cache: cache, + recorder, } } @@ -96,6 +132,11 @@ where &self.storage } + /// Get backend storage mutable reference. + pub fn backend_storage_mut(&mut self) -> &mut S { + &mut self.storage + } + /// Get trie root. pub fn root(&self) -> &H::Out { &self.root @@ -120,7 +161,97 @@ where pub fn into_storage(self) -> S { self.storage } +} + +impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEssence { + /// Call the given closure passing it the recorder and the cache. + /// + /// If the given `storage_root` is `None`, `self.root` will be used. + #[cfg(feature = "std")] + fn with_recorder_and_cache( + &self, + storage_root: Option, + callback: impl FnOnce( + Option<&mut dyn TrieRecorder>, + Option<&mut dyn TrieCache>>, + ) -> R, + ) -> R { + let storage_root = storage_root.unwrap_or_else(|| self.root); + let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder()); + let recorder = recorder.as_mut().map(|r| r as _); + let mut cache = self + .trie_node_cache + .as_ref() + .map(|c| c.as_local_trie_cache().as_trie_db_cache(storage_root)); + let cache = cache.as_mut().map(|c| c as _); + + callback(recorder, cache) + } + + #[cfg(not(feature = "std"))] + fn with_recorder_and_cache( + &self, + _: Option, + callback: impl FnOnce( + Option<&mut dyn TrieRecorder>, + Option<&mut dyn TrieCache>>, + ) -> R, + ) -> R { + callback(None, None) + } + + /// Call the given closure passing it the recorder and the cache. + /// + /// This function must only be used when the operation in `callback` is + /// calculating a `storage_root`. It is expected that `callback` returns + /// the new storage root. This is required to register the changes in the cache + /// for the correct storage root. + #[cfg(feature = "std")] + fn with_recorder_and_cache_for_storage_root( + &self, + callback: impl FnOnce( + Option<&mut dyn TrieRecorder>, + Option<&mut dyn TrieCache>>, + ) -> (Option, R), + ) -> R { + let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder()); + let recorder = recorder.as_mut().map(|r| r as _); + + let result = if let Some(local_cache) = self.trie_node_cache.as_ref() { + let mut cache = local_cache.as_local_trie_cache().as_trie_db_mut_cache(); + + let (new_root, r) = callback(recorder, Some(&mut cache)); + + if let Some(new_root) = new_root { + cache.merge_into(local_cache.as_local_trie_cache(), new_root); + } + + r + } else { + callback(recorder, None).1 + }; + + result + } + + #[cfg(not(feature = "std"))] + fn with_recorder_and_cache_for_storage_root( + &self, + callback: impl FnOnce( + Option<&mut dyn TrieRecorder>, + Option<&mut dyn TrieCache>>, + ) -> (Option, R), + ) -> R { + callback(None, None).1 + } +} + +impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> + TrieBackendEssence +where + H::Out: Codec + Ord, +{ /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. pub fn next_storage_key(&self, key: &[u8]) -> Result> { @@ -184,39 +315,82 @@ where dyn_eph = self; } - let trie = - TrieDB::::new(dyn_eph, root).map_err(|e| format!("TrieDB creation error: {}", e))?; - let mut iter = trie.key_iter().map_err(|e| format!("TrieDB iteration error: {}", e))?; + self.with_recorder_and_cache(Some(*root), |recorder, cache| { + let trie = TrieDBBuilder::::new(dyn_eph, root) + .with_optional_recorder(recorder) + .with_optional_cache(cache) + .build(); - // The key just after the one given in input, basically `key++0`. - // Note: We are sure this is the next key if: - // * size of key has no limit (i.e. we can always add 0 to the path), - // * and no keys can be inserted between `key` and `key++0` (this is ensured by sp-io). - let mut potential_next_key = Vec::with_capacity(key.len() + 1); - potential_next_key.extend_from_slice(key); - potential_next_key.push(0); + let mut iter = trie.key_iter().map_err(|e| format!("TrieDB iteration error: {}", e))?; - iter.seek(&potential_next_key) - .map_err(|e| format!("TrieDB iterator seek error: {}", e))?; + // The key just after the one given in input, basically `key++0`. + // Note: We are sure this is the next key if: + // * size of key has no limit (i.e. we can always add 0 to the path), + // * and no keys can be inserted between `key` and `key++0` (this is ensured by sp-io). + let mut potential_next_key = Vec::with_capacity(key.len() + 1); + potential_next_key.extend_from_slice(key); + potential_next_key.push(0); - let next_element = iter.next(); + iter.seek(&potential_next_key) + .map_err(|e| format!("TrieDB iterator seek error: {}", e))?; - let next_key = if let Some(next_element) = next_element { - let next_key = - next_element.map_err(|e| format!("TrieDB iterator next error: {}", e))?; - Some(next_key) - } else { - None - }; + let next_element = iter.next(); + + let next_key = if let Some(next_element) = next_element { + let next_key = + next_element.map_err(|e| format!("TrieDB iterator next error: {}", e))?; + Some(next_key) + } else { + None + }; - Ok(next_key) + Ok(next_key) + }) + } + + /// Returns the hash value + pub fn storage_hash(&self, key: &[u8]) -> Result> { + let map_e = |e| format!("Trie lookup error: {}", e); + + self.with_recorder_and_cache(None, |recorder, cache| { + TrieDBBuilder::new(self, &self.root) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build() + .get_hash(key) + .map_err(map_e) + }) } /// Get the value of storage at given key. pub fn storage(&self, key: &[u8]) -> Result> { let map_e = |e| format!("Trie lookup error: {}", e); - read_trie_value::, _>(self, &self.root, key).map_err(map_e) + self.with_recorder_and_cache(None, |recorder, cache| { + read_trie_value::, _>(self, &self.root, key, recorder, cache).map_err(map_e) + }) + } + + /// Returns the hash value + pub fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Result> { + let child_root = match self.child_root(child_info)? { + Some(root) => root, + None => return Ok(None), + }; + + let map_e = |e| format!("Trie lookup error: {}", e); + + self.with_recorder_and_cache(Some(child_root), |recorder, cache| { + read_child_trie_hash::, _>( + child_info.keyspace(), + self, + &child_root, + key, + recorder, + cache, + ) + .map_err(map_e) + }) } /// Get the value of child storage at given key. @@ -225,15 +399,24 @@ where child_info: &ChildInfo, key: &[u8], ) -> Result> { - let root = match self.child_root(child_info)? { + let child_root = match self.child_root(child_info)? { Some(root) => root, None => return Ok(None), }; let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value::, _>(child_info.keyspace(), self, &root, key) + self.with_recorder_and_cache(Some(child_root), |recorder, cache| { + read_child_trie_value::, _>( + child_info.keyspace(), + self, + &child_root, + key, + recorder, + cache, + ) .map_err(map_e) + }) } /// Retrieve all entries keys of storage and call `f` for each of those keys. @@ -338,28 +521,33 @@ where maybe_start_at: Option<&[u8]>, ) { let mut iter = move |db| -> sp_std::result::Result<(), Box>> { - let trie = TrieDB::::new(db, root)?; - let prefix = maybe_prefix.unwrap_or(&[]); - let iter = match maybe_start_at { - Some(start_at) => - TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at), - None => TrieDBKeyIterator::new_prefixed(&trie, prefix), - }?; - - for x in iter { - let key = x?; - - debug_assert!(maybe_prefix - .as_ref() - .map(|prefix| key.starts_with(prefix)) - .unwrap_or(true)); - - if !f(&key) { - break + self.with_recorder_and_cache(Some(*root), |recorder, cache| { + let trie = TrieDBBuilder::::new(db, root) + .with_optional_recorder(recorder) + .with_optional_cache(cache) + .build(); + let prefix = maybe_prefix.unwrap_or(&[]); + let iter = match maybe_start_at { + Some(start_at) => + TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at), + None => TrieDBKeyIterator::new_prefixed(&trie, prefix), + }?; + + for x in iter { + let key = x?; + + debug_assert!(maybe_prefix + .as_ref() + .map(|prefix| key.starts_with(prefix)) + .unwrap_or(true)); + + if !f(&key) { + break + } } - } - Ok(()) + Ok(()) + }) }; let result = if let Some(child_info) = child_info { @@ -383,25 +571,30 @@ where allow_missing_nodes: bool, ) -> Result { let mut iter = move |db| -> sp_std::result::Result>> { - let trie = TrieDB::::new(db, root)?; - - let prefix = prefix.unwrap_or(&[]); - let iterator = if let Some(start_at) = start_at { - TrieDBIterator::new_prefixed_then_seek(&trie, prefix, start_at)? - } else { - TrieDBIterator::new_prefixed(&trie, prefix)? - }; - for x in iterator { - let (key, value) = x?; - - debug_assert!(key.starts_with(prefix)); - - if !f(key, value) { - return Ok(false) + self.with_recorder_and_cache(Some(*root), |recorder, cache| { + let trie = TrieDBBuilder::::new(db, root) + .with_optional_recorder(recorder) + .with_optional_cache(cache) + .build(); + + let prefix = prefix.unwrap_or(&[]); + let iterator = if let Some(start_at) = start_at { + TrieDBIterator::new_prefixed_then_seek(&trie, prefix, start_at)? + } else { + TrieDBIterator::new_prefixed(&trie, prefix)? + }; + for x in iterator { + let (key, value) = x?; + + debug_assert!(key.starts_with(prefix)); + + if !f(key, value) { + return Ok(false) + } } - } - Ok(true) + Ok(true) + }) }; let result = if let Some(child_info) = child_info { @@ -436,14 +629,20 @@ where /// Returns all `(key, value)` pairs in the trie. pub fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { let collect_all = || -> sp_std::result::Result<_, Box>> { - let trie = TrieDB::::new(self, &self.root)?; - let mut v = Vec::new(); - for x in trie.iter()? { - let (key, value) = x?; - v.push((key.to_vec(), value.to_vec())); - } + self.with_recorder_and_cache(None, |recorder, cache| { + let trie = TrieDBBuilder::::new(self, self.root()) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build(); + + let mut v = Vec::new(); + for x in trie.iter()? { + let (key, value) = x?; + v.push((key.to_vec(), value.to_vec())); + } - Ok(v) + Ok(v) + }) }; match collect_all() { @@ -467,27 +666,28 @@ where &self, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, S::Overlay) - where - H::Out: Ord, - { + ) -> (H::Out, S::Overlay) { let mut write_overlay = S::Overlay::default(); - let mut root = self.root; - { + let root = self.with_recorder_and_cache_for_storage_root(|recorder, cache| { let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); let res = match state_version { - StateVersion::V0 => - delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta), - StateVersion::V1 => - delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta), + StateVersion::V0 => delta_trie_root::, _, _, _, _, _>( + &mut eph, self.root, delta, recorder, cache, + ), + StateVersion::V1 => delta_trie_root::, _, _, _, _, _>( + &mut eph, self.root, delta, recorder, cache, + ), }; match res { - Ok(ret) => root = ret, - Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), + Ok(ret) => (Some(ret), ret), + Err(e) => { + warn!(target: "trie", "Failed to write to trie: {}", e); + (None, self.root) + }, } - } + }); (root, write_overlay) } @@ -499,15 +699,12 @@ where child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, bool, S::Overlay) - where - H::Out: Ord, - { + ) -> (H::Out, bool, S::Overlay) { let default_root = match child_info.child_type() { ChildType::ParentKeyId => empty_child_trie_root::>(), }; let mut write_overlay = S::Overlay::default(); - let mut root = match self.child_root(child_info) { + let child_root = match self.child_root(child_info) { Ok(Some(hash)) => hash, Ok(None) => default_root, Err(e) => { @@ -516,32 +713,39 @@ where }, }; - { + let new_child_root = self.with_recorder_and_cache_for_storage_root(|recorder, cache| { let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); match match state_version { StateVersion::V0 => child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, - root, + child_root, delta, + recorder, + cache, ), StateVersion::V1 => child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, - root, + child_root, delta, + recorder, + cache, ), } { - Ok(ret) => root = ret, - Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), + Ok(ret) => (Some(ret), ret), + Err(e) => { + warn!(target: "trie", "Failed to write to trie: {}", e); + (None, child_root) + }, } - } + }); - let is_default = root == default_root; + let is_default = new_child_root == default_root; - (root, is_default, write_overlay) + (new_child_root, is_default, write_overlay) } } @@ -615,6 +819,14 @@ pub trait TrieBackendStorage: Send + Sync { fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; } +impl, H: Hasher> TrieBackendStorage for &T { + type Overlay = T::Overlay; + + fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { + (*self).get(key, prefix) + } +} + // This implementation is used by normal storage trie clients. #[cfg(feature = "std")] impl TrieBackendStorage for Arc> { @@ -637,7 +849,9 @@ where } } -impl, H: Hasher> AsHashDB for TrieBackendEssence { +impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> AsHashDB + for TrieBackendEssence +{ fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self } @@ -646,7 +860,9 @@ impl, H: Hasher> AsHashDB for TrieBackendEs } } -impl, H: Hasher> HashDB for TrieBackendEssence { +impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> HashDB + for TrieBackendEssence +{ fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { return Some([0u8].to_vec()) @@ -677,7 +893,9 @@ impl, H: Hasher> HashDB for TrieBackendEsse } } -impl, H: Hasher> HashDBRef for TrieBackendEssence { +impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> + HashDBRef for TrieBackendEssence +{ fn get(&self, key: &H::Out, prefix: Prefix) -> Option { HashDB::get(self, key, prefix) } @@ -692,7 +910,8 @@ mod test { use super::*; use sp_core::{Blake2Hasher, H256}; use sp_trie::{ - trie_types::TrieDBMutV1 as TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut, + cache::LocalTrieCache, trie_types::TrieDBMutBuilderV1 as TrieDBMutBuilder, KeySpacedDBMut, + PrefixedMemoryDB, TrieMut, }; #[test] @@ -706,7 +925,7 @@ mod test { let mut mdb = PrefixedMemoryDB::::default(); { - let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); + let mut trie = TrieDBMutBuilder::new(&mut mdb, &mut root_1).build(); trie.insert(b"3", &[1]).expect("insert failed"); trie.insert(b"4", &[1]).expect("insert failed"); trie.insert(b"6", &[1]).expect("insert failed"); @@ -715,18 +934,18 @@ mod test { let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); // reuse of root_1 implicitly assert child trie root is same // as top trie (contents must remain the same). - let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); + let mut trie = TrieDBMutBuilder::new(&mut mdb, &mut root_1).build(); trie.insert(b"3", &[1]).expect("insert failed"); trie.insert(b"4", &[1]).expect("insert failed"); trie.insert(b"6", &[1]).expect("insert failed"); } { - let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); + let mut trie = TrieDBMutBuilder::new(&mut mdb, &mut root_2).build(); trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref()) .expect("insert failed"); }; - let essence_1 = TrieBackendEssence::new(mdb, root_1); + let essence_1 = TrieBackendEssence::<_, _, LocalTrieCache<_>>::new(mdb, root_1); assert_eq!(essence_1.next_storage_key(b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_1.next_storage_key(b"3"), Ok(Some(b"4".to_vec()))); @@ -734,8 +953,8 @@ mod test { assert_eq!(essence_1.next_storage_key(b"5"), Ok(Some(b"6".to_vec()))); assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); - let mdb = essence_1.into_storage(); - let essence_2 = TrieBackendEssence::new(mdb, root_2); + let mdb = essence_1.backend_storage().clone(); + let essence_2 = TrieBackendEssence::<_, _, LocalTrieCache<_>>::new(mdb, root_2); assert_eq!(essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec()))); diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 42701f5ad3bf1..2e8f281cd7c7b 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } +async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } futures-timer = { version = "3.0.2", optional = true } log = { version = "0.4.17", optional = true } diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index d305b756e2d68..c2ca57d2b5a43 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -23,7 +23,7 @@ codec = { version = "3.0.0", package = "parity-scale-codec", default-features = "derive", ] } tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.26", default-features = false } +tracing-core = { version = "0.1.28", default-features = false } tracing-subscriber = { version = "0.2.25", optional = true, features = [ "tracing-log", ] } diff --git a/primitives/transaction-storage-proof/Cargo.toml b/primitives/transaction-storage-proof/Cargo.toml index a5a80528c6734..e916462675435 100644 --- a/primitives/transaction-storage-proof/Cargo.toml +++ b/primitives/transaction-storage-proof/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } +async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } diff --git a/primitives/transaction-storage-proof/src/lib.rs b/primitives/transaction-storage-proof/src/lib.rs index ee0c8e4ec8e29..fde84c1c58b1a 100644 --- a/primitives/transaction-storage-proof/src/lib.rs +++ b/primitives/transaction-storage-proof/src/lib.rs @@ -200,7 +200,8 @@ pub mod registration { let mut transaction_root = sp_trie::empty_trie_root::(); { let mut trie = - sp_trie::TrieDBMut::::new(&mut db, &mut transaction_root); + sp_trie::TrieDBMutBuilder::::new(&mut db, &mut transaction_root) + .build(); let chunks = transaction.chunks(CHUNK_SIZE).map(|c| c.to_vec()); for (index, chunk) in chunks.enumerate() { let index = encode_index(index as u32); diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index a3754461f890e..291615c9354c1 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -18,12 +18,19 @@ name = "bench" harness = false [dependencies] +ahash = { version = "0.7.6", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +hashbrown = { version = "0.12.3", optional = true } hash-db = { version = "0.15.2", default-features = false } +lazy_static = { version = "1.4.0", optional = true } +lru = { version = "0.7.5", optional = true } memory-db = { version = "0.29.0", default-features = false } +nohash-hasher = { version = "0.2.0", optional = true } +parking_lot = { version = "0.12.1", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } thiserror = { version = "1.0.30", optional = true } -trie-db = { version = "0.23.1", default-features = false } +tracing = { version = "0.1.29", optional = true } +trie-db = { version = "0.24.0", default-features = false } trie-root = { version = "0.17.0", default-features = false } sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } @@ -31,20 +38,27 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] criterion = "0.3.3" hex-literal = "0.3.4" -trie-bench = "0.30.0" +trie-bench = "0.31.0" trie-standardmap = "0.15.2" sp-runtime = { version = "6.0.0", path = "../runtime" } [features] default = ["std"] std = [ + "ahash", "codec/std", + "hashbrown", "hash-db/std", + "lazy_static", + "lru", "memory-db/std", + "nohash-hasher", + "parking_lot", "scale-info/std", "sp-core/std", "sp-std/std", "thiserror", + "tracing", "trie-db/std", "trie-root/std", ] diff --git a/primitives/trie/src/cache/mod.rs b/primitives/trie/src/cache/mod.rs new file mode 100644 index 0000000000000..f6a447763a80e --- /dev/null +++ b/primitives/trie/src/cache/mod.rs @@ -0,0 +1,692 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Trie Cache +//! +//! Provides an implementation of the [`TrieCache`](trie_db::TrieCache) trait. +//! The implementation is split into three types [`SharedTrieCache`], [`LocalTrieCache`] and +//! [`TrieCache`]. The [`SharedTrieCache`] is the instance that should be kept around for the entire +//! lifetime of the node. It will store all cached trie nodes and values on a global level. Then +//! there is the [`LocalTrieCache`] that should be kept around per state instance requested from the +//! backend. As there are very likely multiple accesses to the state per instance, this +//! [`LocalTrieCache`] is used to cache the nodes and the values before they are merged back to the +//! shared instance. Last but not least there is the [`TrieCache`] that is being used per access to +//! the state. It will use the [`SharedTrieCache`] and the [`LocalTrieCache`] to fulfill cache +//! requests. If both of them don't provide the requested data it will be inserted into the +//! [`LocalTrieCache`] and then later into the [`SharedTrieCache`]. +//! +//! The [`SharedTrieCache`] is bound to some maximum number of bytes. It is ensured that it never +//! runs above this limit. However as long as data is cached inside a [`LocalTrieCache`] it isn't +//! taken into account when limiting the [`SharedTrieCache`]. This means that for the lifetime of a +//! [`LocalTrieCache`] the actual memory usage could be above the allowed maximum. + +use crate::{Error, NodeCodec}; +use hash_db::Hasher; +use hashbrown::HashSet; +use nohash_hasher::BuildNoHashHasher; +use parking_lot::{Mutex, MutexGuard, RwLockReadGuard}; +use shared_cache::{SharedValueCache, ValueCacheKey}; +use std::{ + collections::{hash_map::Entry as MapEntry, HashMap}, + sync::Arc, +}; +use trie_db::{node::NodeOwned, CachedValue}; + +mod shared_cache; + +pub use shared_cache::SharedTrieCache; + +use self::shared_cache::{SharedTrieCacheInner, ValueCacheKeyHash}; + +const LOG_TARGET: &str = "trie-cache"; + +/// The size of the cache. +#[derive(Debug, Clone, Copy)] +pub enum CacheSize { + /// Do not limit the cache size. + Unlimited, + /// Let the cache in maximum use the given amount of bytes. + Maximum(usize), +} + +impl CacheSize { + /// Returns `true` if the `current_size` exceeds the allowed size. + fn exceeds(&self, current_size: usize) -> bool { + match self { + Self::Unlimited => false, + Self::Maximum(max) => *max < current_size, + } + } +} + +/// The local trie cache. +/// +/// This cache should be used per state instance created by the backend. One state instance is +/// referring to the state of one block. It will cache all the accesses that are done to the state +/// which could not be fullfilled by the [`SharedTrieCache`]. These locally cached items are merged +/// back to the shared trie cache when this instance is dropped. +/// +/// When using [`Self::as_trie_db_cache`] or [`Self::as_trie_db_mut_cache`], it will lock Mutexes. +/// So, it is important that these methods are not called multiple times, because they otherwise +/// deadlock. +pub struct LocalTrieCache { + /// The shared trie cache that created this instance. + shared: SharedTrieCache, + /// The local cache for the trie nodes. + node_cache: Mutex>>, + /// Keeps track of all the trie nodes accessed in the shared cache. + /// + /// This will be used to ensure that these nodes are brought to the front of the lru when this + /// local instance is merged back to the shared cache. + shared_node_cache_access: Mutex>, + /// The local cache for the values. + value_cache: Mutex< + HashMap< + ValueCacheKey<'static, H::Out>, + CachedValue, + BuildNoHashHasher>, + >, + >, + /// Keeps track of all values accessed in the shared cache. + /// + /// This will be used to ensure that these nodes are brought to the front of the lru when this + /// local instance is merged back to the shared cache. This can actually lead to collision when + /// two [`ValueCacheKey`]s with different storage roots and keys map to the same hash. However, + /// as we only use this set to update the lru position it is fine, even if we bring the wrong + /// value to the top. The important part is that we always get the correct value from the value + /// cache for a given key. + shared_value_cache_access: + Mutex>>, +} + +impl LocalTrieCache { + /// Return self as a [`TrieDB`](trie_db::TrieDB) compatible cache. + /// + /// The given `storage_root` needs to be the storage root of the trie this cache is used for. + pub fn as_trie_db_cache(&self, storage_root: H::Out) -> TrieCache<'_, H> { + let shared_inner = self.shared.read_lock_inner(); + + let value_cache = ValueCache::ForStorageRoot { + storage_root, + local_value_cache: self.value_cache.lock(), + shared_value_cache_access: self.shared_value_cache_access.lock(), + }; + + TrieCache { + shared_inner, + local_cache: self.node_cache.lock(), + value_cache, + shared_node_cache_access: self.shared_node_cache_access.lock(), + } + } + + /// Return self as [`TrieDBMut`](trie_db::TrieDBMut) compatible cache. + /// + /// After finishing all operations with [`TrieDBMut`](trie_db::TrieDBMut) and having obtained + /// the new storage root, [`TrieCache::merge_into`] should be called to update this local + /// cache instance. If the function is not called, cached data is just thrown away and not + /// propagated to the shared cache. So, accessing these new items will be slower, but nothing + /// would break because of this. + pub fn as_trie_db_mut_cache(&self) -> TrieCache<'_, H> { + TrieCache { + shared_inner: self.shared.read_lock_inner(), + local_cache: self.node_cache.lock(), + value_cache: ValueCache::Fresh(Default::default()), + shared_node_cache_access: self.shared_node_cache_access.lock(), + } + } +} + +impl Drop for LocalTrieCache { + fn drop(&mut self) { + let mut shared_inner = self.shared.write_lock_inner(); + + shared_inner + .node_cache_mut() + .update(self.node_cache.lock().drain(), self.shared_node_cache_access.lock().drain()); + + shared_inner + .value_cache_mut() + .update(self.value_cache.lock().drain(), self.shared_value_cache_access.lock().drain()); + } +} + +/// The abstraction of the value cache for the [`TrieCache`]. +enum ValueCache<'a, H> { + /// The value cache is fresh, aka not yet associated to any storage root. + /// This is used for example when a new trie is being build, to cache new values. + Fresh(HashMap, CachedValue>), + /// The value cache is already bound to a specific storage root. + ForStorageRoot { + shared_value_cache_access: MutexGuard< + 'a, + HashSet>, + >, + local_value_cache: MutexGuard< + 'a, + HashMap< + ValueCacheKey<'static, H>, + CachedValue, + nohash_hasher::BuildNoHashHasher>, + >, + >, + storage_root: H, + }, +} + +impl + std::hash::Hash + Eq + Clone + Copy> ValueCache<'_, H> { + /// Get the value for the given `key`. + fn get<'a>( + &'a mut self, + key: &[u8], + shared_value_cache: &'a SharedValueCache, + ) -> Option<&CachedValue> { + match self { + Self::Fresh(map) => map.get(key), + Self::ForStorageRoot { local_value_cache, shared_value_cache_access, storage_root } => { + let key = ValueCacheKey::new_ref(key, *storage_root); + + // We first need to look up in the local cache and then the shared cache. + // It can happen that some value is cached in the shared cache, but the + // weak reference of the data can not be upgraded anymore. This for example + // happens when the node is dropped that contains the strong reference to the data. + // + // So, the logic of the trie would lookup the data and the node and store both + // in our local caches. + local_value_cache + .get(unsafe { + // SAFETY + // + // We need to convert the lifetime to make the compiler happy. However, as + // we only use the `key` to looking up the value this lifetime conversion is + // safe. + std::mem::transmute::<&ValueCacheKey<'_, H>, &ValueCacheKey<'static, H>>( + &key, + ) + }) + .or_else(|| { + shared_value_cache.get(&key).map(|v| { + shared_value_cache_access.insert(key.get_hash()); + v + }) + }) + }, + } + } + + /// Insert some new `value` under the given `key`. + fn insert(&mut self, key: &[u8], value: CachedValue) { + match self { + Self::Fresh(map) => { + map.insert(key.into(), value); + }, + Self::ForStorageRoot { local_value_cache, storage_root, .. } => { + local_value_cache.insert(ValueCacheKey::new_value(key, *storage_root), value); + }, + } + } +} + +/// The actual [`TrieCache`](trie_db::TrieCache) implementation. +/// +/// If this instance was created for using it with a [`TrieDBMut`](trie_db::TrieDBMut), it needs to +/// be merged back into the [`LocalTrieCache`] with [`Self::merge_into`] after all operations are +/// done. +pub struct TrieCache<'a, H: Hasher> { + shared_inner: RwLockReadGuard<'a, SharedTrieCacheInner>, + shared_node_cache_access: MutexGuard<'a, HashSet>, + local_cache: MutexGuard<'a, HashMap>>, + value_cache: ValueCache<'a, H::Out>, +} + +impl<'a, H: Hasher> TrieCache<'a, H> { + /// Merge this cache into the given [`LocalTrieCache`]. + /// + /// This function is only required to be called when this instance was created through + /// [`LocalTrieCache::as_trie_db_mut_cache`], otherwise this method is a no-op. The given + /// `storage_root` is the new storage root that was obtained after finishing all operations + /// using the [`TrieDBMut`](trie_db::TrieDBMut). + pub fn merge_into(self, local: &LocalTrieCache, storage_root: H::Out) { + let cache = if let ValueCache::Fresh(cache) = self.value_cache { cache } else { return }; + + if !cache.is_empty() { + let mut value_cache = local.value_cache.lock(); + let partial_hash = ValueCacheKey::hash_partial_data(&storage_root); + + cache + .into_iter() + .map(|(k, v)| { + let hash = + ValueCacheKeyHash::from_hasher_and_storage_key(partial_hash.clone(), &k); + (ValueCacheKey::Value { storage_key: k, storage_root, hash }, v) + }) + .for_each(|(k, v)| { + value_cache.insert(k, v); + }); + } + } +} + +impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { + fn get_or_insert_node( + &mut self, + hash: H::Out, + fetch_node: &mut dyn FnMut() -> trie_db::Result, H::Out, Error>, + ) -> trie_db::Result<&NodeOwned, H::Out, Error> { + if let Some(res) = self.shared_inner.node_cache().get(&hash) { + tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache"); + self.shared_node_cache_access.insert(hash); + return Ok(res) + } + + match self.local_cache.entry(hash) { + MapEntry::Occupied(res) => { + tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache"); + Ok(res.into_mut()) + }, + MapEntry::Vacant(vacant) => { + let node = (*fetch_node)(); + + tracing::trace!( + target: LOG_TARGET, + ?hash, + fetch_successful = node.is_ok(), + "Node not found, needed to fetch it." + ); + + Ok(vacant.insert(node?)) + }, + } + } + + fn get_node(&mut self, hash: &H::Out) -> Option<&NodeOwned> { + if let Some(node) = self.shared_inner.node_cache().get(hash) { + tracing::trace!(target: LOG_TARGET, ?hash, "Getting node from shared cache"); + self.shared_node_cache_access.insert(*hash); + return Some(node) + } + + let res = self.local_cache.get(hash); + + tracing::trace!( + target: LOG_TARGET, + ?hash, + found = res.is_some(), + "Getting node from local cache" + ); + + res + } + + fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&CachedValue> { + let res = self.value_cache.get(key, self.shared_inner.value_cache()); + + tracing::trace!( + target: LOG_TARGET, + key = ?sp_core::hexdisplay::HexDisplay::from(&key), + found = res.is_some(), + "Looked up value for key", + ); + + res + } + + fn cache_value_for_key(&mut self, key: &[u8], data: CachedValue) { + tracing::trace!( + target: LOG_TARGET, + key = ?sp_core::hexdisplay::HexDisplay::from(&key), + "Caching value for key", + ); + + self.value_cache.insert(key.into(), data); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use trie_db::{Bytes, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut}; + + type MemoryDB = crate::MemoryDB; + type Layout = crate::LayoutV1; + type Cache = super::SharedTrieCache; + type Recorder = crate::recorder::Recorder; + + const TEST_DATA: &[(&[u8], &[u8])] = + &[(b"key1", b"val1"), (b"key2", &[2; 64]), (b"key3", b"val3"), (b"key4", &[4; 64])]; + const CACHE_SIZE_RAW: usize = 1024 * 10; + const CACHE_SIZE: CacheSize = CacheSize::Maximum(CACHE_SIZE_RAW); + + fn create_trie() -> (MemoryDB, TrieHash) { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + { + let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); + for (k, v) in TEST_DATA { + trie.insert(k, v).expect("Inserts data"); + } + } + + (db, root) + } + + #[test] + fn basic_cache_works() { + let (db, root) = create_trie(); + + let shared_cache = Cache::new(CACHE_SIZE); + let local_cache = shared_cache.local_cache(); + + { + let mut cache = local_cache.as_trie_db_cache(root); + let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); + assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap()); + } + + // Local cache wasn't dropped yet, so there should nothing in the shared caches. + assert!(shared_cache.read_lock_inner().value_cache().lru.is_empty()); + assert!(shared_cache.read_lock_inner().node_cache().lru.is_empty()); + + drop(local_cache); + + // Now we should have the cached items in the shared cache. + assert!(shared_cache.read_lock_inner().node_cache().lru.len() >= 1); + let cached_data = shared_cache + .read_lock_inner() + .value_cache() + .lru + .peek(&ValueCacheKey::new_value(TEST_DATA[0].0, root)) + .unwrap() + .clone(); + assert_eq!(Bytes::from(TEST_DATA[0].1.to_vec()), cached_data.data().flatten().unwrap()); + + let fake_data = Bytes::from(&b"fake_data"[..]); + + let local_cache = shared_cache.local_cache(); + shared_cache.write_lock_inner().value_cache_mut().lru.put( + ValueCacheKey::new_value(TEST_DATA[1].0, root), + (fake_data.clone(), Default::default()).into(), + ); + + { + let mut cache = local_cache.as_trie_db_cache(root); + let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); + + // We should now get the "fake_data", because we inserted this manually to the cache. + assert_eq!(b"fake_data".to_vec(), trie.get(TEST_DATA[1].0).unwrap().unwrap()); + } + } + + #[test] + fn trie_db_mut_cache_works() { + let (mut db, root) = create_trie(); + + let new_key = b"new_key".to_vec(); + // Use some long value to not have it inlined + let new_value = vec![23; 64]; + + let shared_cache = Cache::new(CACHE_SIZE); + let mut new_root = root; + + { + let local_cache = shared_cache.local_cache(); + + let mut cache = local_cache.as_trie_db_mut_cache(); + + { + let mut trie = TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) + .with_cache(&mut cache) + .build(); + + trie.insert(&new_key, &new_value).unwrap(); + } + + cache.merge_into(&local_cache, new_root); + } + + // After the local cache is dropped, all changes should have been merged back to the shared + // cache. + let cached_data = shared_cache + .read_lock_inner() + .value_cache() + .lru + .peek(&ValueCacheKey::new_value(new_key, new_root)) + .unwrap() + .clone(); + assert_eq!(Bytes::from(new_value), cached_data.data().flatten().unwrap()); + } + + #[test] + fn trie_db_cache_and_recorder_work_together() { + let (db, root) = create_trie(); + + let shared_cache = Cache::new(CACHE_SIZE); + + for i in 0..5 { + // Clear some of the caches. + if i == 2 { + shared_cache.reset_node_cache(); + } else if i == 3 { + shared_cache.reset_value_cache(); + } + + let local_cache = shared_cache.local_cache(); + let recorder = Recorder::default(); + + { + let mut cache = local_cache.as_trie_db_cache(root); + let mut recorder = recorder.as_trie_recorder(); + let trie = TrieDBBuilder::::new(&db, &root) + .with_cache(&mut cache) + .with_recorder(&mut recorder) + .build(); + + for (key, value) in TEST_DATA { + assert_eq!(*value, trie.get(&key).unwrap().unwrap()); + } + } + + let storage_proof = recorder.drain_storage_proof(); + let memory_db: MemoryDB = storage_proof.into_memory_db(); + + { + let trie = TrieDBBuilder::::new(&memory_db, &root).build(); + + for (key, value) in TEST_DATA { + assert_eq!(*value, trie.get(&key).unwrap().unwrap()); + } + } + } + } + + #[test] + fn trie_db_mut_cache_and_recorder_work_together() { + const DATA_TO_ADD: &[(&[u8], &[u8])] = &[(b"key11", &[45; 78]), (b"key33", &[78; 89])]; + + let (db, root) = create_trie(); + + let shared_cache = Cache::new(CACHE_SIZE); + + // Run this twice so that we use the data cache in the second run. + for i in 0..5 { + // Clear some of the caches. + if i == 2 { + shared_cache.reset_node_cache(); + } else if i == 3 { + shared_cache.reset_value_cache(); + } + + let recorder = Recorder::default(); + let local_cache = shared_cache.local_cache(); + let mut new_root = root; + + { + let mut db = db.clone(); + let mut cache = local_cache.as_trie_db_cache(root); + let mut recorder = recorder.as_trie_recorder(); + let mut trie = TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) + .with_cache(&mut cache) + .with_recorder(&mut recorder) + .build(); + + for (key, value) in DATA_TO_ADD { + trie.insert(key, value).unwrap(); + } + } + + let storage_proof = recorder.drain_storage_proof(); + let mut memory_db: MemoryDB = storage_proof.into_memory_db(); + let mut proof_root = root; + + { + let mut trie = + TrieDBMutBuilder::::from_existing(&mut memory_db, &mut proof_root) + .build(); + + for (key, value) in DATA_TO_ADD { + trie.insert(key, value).unwrap(); + } + } + + assert_eq!(new_root, proof_root) + } + } + + #[test] + fn cache_lru_works() { + let (db, root) = create_trie(); + + let shared_cache = Cache::new(CACHE_SIZE); + + { + let local_cache = shared_cache.local_cache(); + + let mut cache = local_cache.as_trie_db_cache(root); + let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); + + for (k, _) in TEST_DATA { + trie.get(k).unwrap().unwrap(); + } + } + + // Check that all items are there. + assert!(shared_cache + .read_lock_inner() + .value_cache() + .lru + .iter() + .map(|d| d.0) + .all(|l| TEST_DATA.iter().any(|d| l.storage_key().unwrap() == d.0))); + + // Run this in a loop. The first time we check that with the filled value cache, + // the expected values are at the top of the LRU. + // The second run is using an empty value cache to ensure that we access the nodes. + for _ in 0..2 { + { + let local_cache = shared_cache.local_cache(); + + let mut cache = local_cache.as_trie_db_cache(root); + let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); + + for (k, _) in TEST_DATA.iter().take(2) { + trie.get(k).unwrap().unwrap(); + } + } + + // Ensure that the accessed items are most recently used items of the shared value + // cache. + assert!(shared_cache + .read_lock_inner() + .value_cache() + .lru + .iter() + .take(2) + .map(|d| d.0) + .all(|l| { TEST_DATA.iter().take(2).any(|d| l.storage_key().unwrap() == d.0) })); + + // Delete the value cache, so that we access the nodes. + shared_cache.reset_value_cache(); + } + + let most_recently_used_nodes = shared_cache + .read_lock_inner() + .node_cache() + .lru + .iter() + .map(|d| *d.0) + .collect::>(); + + { + let local_cache = shared_cache.local_cache(); + + let mut cache = local_cache.as_trie_db_cache(root); + let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); + + for (k, _) in TEST_DATA.iter().skip(2) { + trie.get(k).unwrap().unwrap(); + } + } + + // Ensure that the most recently used nodes changed as well. + assert_ne!( + most_recently_used_nodes, + shared_cache + .read_lock_inner() + .node_cache() + .lru + .iter() + .map(|d| *d.0) + .collect::>() + ); + } + + #[test] + fn cache_respects_bounds() { + let (mut db, root) = create_trie(); + + let shared_cache = Cache::new(CACHE_SIZE); + { + let local_cache = shared_cache.local_cache(); + + let mut new_root = root; + + { + let mut cache = local_cache.as_trie_db_cache(root); + { + let mut trie = + TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) + .with_cache(&mut cache) + .build(); + + let value = vec![10u8; 100]; + // Ensure we add enough data that would overflow the cache. + for i in 0..CACHE_SIZE_RAW / 100 * 2 { + trie.insert(format!("key{}", i).as_bytes(), &value).unwrap(); + } + } + + cache.merge_into(&local_cache, new_root); + } + } + + let node_cache_size = shared_cache.read_lock_inner().node_cache().size_in_bytes; + let value_cache_size = shared_cache.read_lock_inner().value_cache().size_in_bytes; + + assert!(node_cache_size + value_cache_size < CACHE_SIZE_RAW); + } +} diff --git a/primitives/trie/src/cache/shared_cache.rs b/primitives/trie/src/cache/shared_cache.rs new file mode 100644 index 0000000000000..abac8c9f946ca --- /dev/null +++ b/primitives/trie/src/cache/shared_cache.rs @@ -0,0 +1,677 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +///! Provides the [`SharedNodeCache`], the [`SharedValueCache`] and the [`SharedTrieCache`] +///! that combines both caches and is exported to the outside. +use super::{CacheSize, LOG_TARGET}; +use hash_db::Hasher; +use hashbrown::{hash_set::Entry as SetEntry, HashSet}; +use lru::LruCache; +use nohash_hasher::BuildNoHashHasher; +use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +use std::{ + hash::{BuildHasher, Hasher as _}, + mem, + sync::Arc, +}; +use trie_db::{node::NodeOwned, CachedValue}; + +lazy_static::lazy_static! { + static ref RANDOM_STATE: ahash::RandomState = ahash::RandomState::default(); +} + +/// No hashing [`LruCache`]. +type NoHashingLruCache = lru::LruCache>; + +/// The shared node cache. +/// +/// Internally this stores all cached nodes in a [`LruCache`]. It ensures that when updating the +/// cache, that the cache stays within its allowed bounds. +pub(super) struct SharedNodeCache { + /// The cached nodes, ordered by least recently used. + pub(super) lru: LruCache>, + /// The size of [`Self::lru`] in bytes. + pub(super) size_in_bytes: usize, + /// The maximum cache size of [`Self::lru`]. + maximum_cache_size: CacheSize, +} + +impl + Eq + std::hash::Hash> SharedNodeCache { + /// Create a new instance. + fn new(cache_size: CacheSize) -> Self { + Self { lru: LruCache::unbounded(), size_in_bytes: 0, maximum_cache_size: cache_size } + } + + /// Get the node for `key`. + /// + /// This doesn't change the least recently order in the internal [`LruCache`]. + pub fn get(&self, key: &H) -> Option<&NodeOwned> { + self.lru.peek(key) + } + + /// Update the cache with the `added` nodes and the `accessed` nodes. + /// + /// The `added` nodes are the ones that have been collected by doing operations on the trie and + /// now should be stored in the shared cache. The `accessed` nodes are only referenced by hash + /// and represent the nodes that were retrieved from this shared cache through [`Self::get`]. + /// These `accessed` nodes are being put to the front of the internal [`LruCache`] like the + /// `added` ones. + /// + /// After the internal [`LruCache`] was updated, it is ensured that the internal [`LruCache`] is + /// inside its bounds ([`Self::maximum_size_in_bytes`]). + pub fn update( + &mut self, + added: impl IntoIterator)>, + accessed: impl IntoIterator, + ) { + let update_size_in_bytes = |size_in_bytes: &mut usize, key: &H, node: &NodeOwned| { + if let Some(new_size_in_bytes) = + size_in_bytes.checked_sub(key.as_ref().len() + node.size_in_bytes()) + { + *size_in_bytes = new_size_in_bytes; + } else { + *size_in_bytes = 0; + tracing::error!(target: LOG_TARGET, "`SharedNodeCache` underflow detected!",); + } + }; + + accessed.into_iter().for_each(|key| { + // Access every node in the lru to put it to the front. + self.lru.get(&key); + }); + added.into_iter().for_each(|(key, node)| { + self.size_in_bytes += key.as_ref().len() + node.size_in_bytes(); + + if let Some((r_key, r_node)) = self.lru.push(key, node) { + update_size_in_bytes(&mut self.size_in_bytes, &r_key, &r_node); + } + + // Directly ensure that we respect the maximum size. By doing it directly here we ensure + // that the internal map of the [`LruCache`] doesn't grow too much. + while self.maximum_cache_size.exceeds(self.size_in_bytes) { + // This should always be `Some(_)`, otherwise something is wrong! + if let Some((key, node)) = self.lru.pop_lru() { + update_size_in_bytes(&mut self.size_in_bytes, &key, &node); + } + } + }); + } + + /// Reset the cache. + fn reset(&mut self) { + self.size_in_bytes = 0; + self.lru.clear(); + } +} + +/// The hash of [`ValueCacheKey`]. +#[derive(Eq, Clone, Copy)] +pub struct ValueCacheKeyHash(u64); + +impl ValueCacheKeyHash { + pub fn from_hasher_and_storage_key( + mut hasher: impl std::hash::Hasher, + storage_key: &[u8], + ) -> Self { + hasher.write(storage_key); + + Self(hasher.finish()) + } +} + +impl PartialEq for ValueCacheKeyHash { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl std::hash::Hash for ValueCacheKeyHash { + fn hash(&self, state: &mut Hasher) { + state.write_u64(self.0); + } +} + +impl nohash_hasher::IsEnabled for ValueCacheKeyHash {} + +/// A type that can only be constructed inside of this file. +/// +/// It "requires" that the user has read the docs to prevent fuck ups. +#[derive(Eq, PartialEq)] +pub(super) struct IReadTheDocumentation(()); + +/// The key type that is being used to address a [`CachedValue`]. +/// +/// This type is implemented as `enum` to improve the performance when accessing the value cache. +/// The problem being that we need to calculate the `hash` of [`Self`] in worst case three times +/// when trying to find a value in the value cache. First to lookup the local cache, then the shared +/// cache and if we found it in the shared cache a third time to insert it into the list of accessed +/// values. To work around each variant stores the `hash` to identify a unique combination of +/// `storage_key` and `storage_root`. However, be aware that this `hash` can lead to collisions when +/// there are two different `storage_key` and `storage_root` pairs that map to the same `hash`. This +/// type also has the `Hash` variant. This variant should only be used for the use case of updating +/// the lru for a key. Because when using only the `Hash` variant to getting a value from a hash map +/// it could happen that a wrong value is returned when there is another key in the same hash map +/// that maps to the same `hash`. The [`PartialEq`] implementation is written in a way that when one +/// of the two compared instances is the `Hash` variant, we will only compare the hashes. This +/// ensures that we can use the `Hash` variant to bring values up in the lru. +#[derive(Eq)] +pub(super) enum ValueCacheKey<'a, H> { + /// Variant that stores the `storage_key` by value. + Value { + /// The storage root of the trie this key belongs to. + storage_root: H, + /// The key to access the value in the storage. + storage_key: Arc<[u8]>, + /// The hash that identifying this instance of `storage_root` and `storage_key`. + hash: ValueCacheKeyHash, + }, + /// Variant that only references the `storage_key`. + Ref { + /// The storage root of the trie this key belongs to. + storage_root: H, + /// The key to access the value in the storage. + storage_key: &'a [u8], + /// The hash that identifying this instance of `storage_root` and `storage_key`. + hash: ValueCacheKeyHash, + }, + /// Variant that only stores the hash that represents the `storage_root` and `storage_key`. + /// + /// This should be used by caution, because it can lead to accessing the wrong value in a + /// hash map/set when there exists two different `storage_root`s and `storage_key`s that + /// map to the same `hash`. + Hash { hash: ValueCacheKeyHash, _i_read_the_documentation: IReadTheDocumentation }, +} + +impl<'a, H> ValueCacheKey<'a, H> { + /// Constructs [`Self::Value`]. + pub fn new_value(storage_key: impl Into>, storage_root: H) -> Self + where + H: AsRef<[u8]>, + { + let storage_key = storage_key.into(); + let hash = Self::hash_data(&storage_key, &storage_root); + Self::Value { storage_root, storage_key, hash } + } + + /// Constructs [`Self::Ref`]. + pub fn new_ref(storage_key: &'a [u8], storage_root: H) -> Self + where + H: AsRef<[u8]>, + { + let storage_key = storage_key.into(); + let hash = Self::hash_data(storage_key, &storage_root); + Self::Ref { storage_root, storage_key, hash } + } + + /// Returns a hasher prepared to build the final hash to identify [`Self`]. + /// + /// See [`Self::hash_data`] for building the hash directly. + pub fn hash_partial_data(storage_root: &H) -> impl std::hash::Hasher + Clone + where + H: AsRef<[u8]>, + { + let mut hasher = RANDOM_STATE.build_hasher(); + hasher.write(storage_root.as_ref()); + hasher + } + + /// Hash the `key` and `storage_root` that identify [`Self`]. + /// + /// Returns a `u64` which represents the unique hash for the given inputs. + pub fn hash_data(key: &[u8], storage_root: &H) -> ValueCacheKeyHash + where + H: AsRef<[u8]>, + { + let hasher = Self::hash_partial_data(storage_root); + + ValueCacheKeyHash::from_hasher_and_storage_key(hasher, key) + } + + /// Returns the `hash` that identifies the current instance. + pub fn get_hash(&self) -> ValueCacheKeyHash { + match self { + Self::Value { hash, .. } | Self::Ref { hash, .. } | Self::Hash { hash, .. } => *hash, + } + } + + /// Returns the stored storage root. + pub fn storage_root(&self) -> Option<&H> { + match self { + Self::Value { storage_root, .. } | Self::Ref { storage_root, .. } => Some(storage_root), + Self::Hash { .. } => None, + } + } + + /// Returns the stored storage key. + pub fn storage_key(&self) -> Option<&[u8]> { + match self { + Self::Ref { storage_key, .. } => Some(&storage_key), + Self::Value { storage_key, .. } => Some(storage_key), + Self::Hash { .. } => None, + } + } +} + +// Implement manually to ensure that the `Value` and `Hash` are treated equally. +impl std::hash::Hash for ValueCacheKey<'_, H> { + fn hash(&self, state: &mut Hasher) { + self.get_hash().hash(state) + } +} + +impl nohash_hasher::IsEnabled for ValueCacheKey<'_, H> {} + +// Implement manually to ensure that the `Value` and `Hash` are treated equally. +impl PartialEq for ValueCacheKey<'_, H> { + fn eq(&self, other: &Self) -> bool { + // First check if `self` or `other` is only the `Hash`. + // Then we only compare the `hash`. So, there could actually be some collision + // if two different storage roots and keys are mapping to the same key. See the + // [`ValueCacheKey`] docs for more information. + match (self, other) { + (Self::Hash { hash, .. }, Self::Hash { hash: other_hash, .. }) => hash == other_hash, + (Self::Hash { hash, .. }, _) => *hash == other.get_hash(), + (_, Self::Hash { hash: other_hash, .. }) => self.get_hash() == *other_hash, + // If both are not the `Hash` variant, we compare all the values. + _ => + self.get_hash() == other.get_hash() && + self.storage_root() == other.storage_root() && + self.storage_key() == other.storage_key(), + } + } +} + +/// The shared value cache. +/// +/// The cache ensures that it stays in the configured size bounds. +pub(super) struct SharedValueCache { + /// The cached nodes, ordered by least recently used. + pub(super) lru: NoHashingLruCache, CachedValue>, + /// The size of [`Self::lru`] in bytes. + pub(super) size_in_bytes: usize, + /// The maximum cache size of [`Self::lru`]. + maximum_cache_size: CacheSize, + /// All known storage keys that are stored in [`Self::lru`]. + /// + /// This is used to de-duplicate keys in memory that use the + /// same [`SharedValueCache::storage_key`], but have a different + /// [`SharedValueCache::storage_root`]. + known_storage_keys: HashSet>, +} + +impl> SharedValueCache { + /// Create a new instance. + fn new(cache_size: CacheSize) -> Self { + Self { + lru: NoHashingLruCache::unbounded_with_hasher(Default::default()), + size_in_bytes: 0, + maximum_cache_size: cache_size, + known_storage_keys: Default::default(), + } + } + + /// Get the [`CachedValue`] for `key`. + /// + /// This doesn't change the least recently order in the internal [`LruCache`]. + pub fn get<'a>(&'a self, key: &ValueCacheKey) -> Option<&'a CachedValue> { + debug_assert!( + !matches!(key, ValueCacheKey::Hash { .. }), + "`get` can not be called with `Hash` variant as this may returns the wrong value." + ); + + self.lru.peek(unsafe { + // SAFETY + // + // We need to convert the lifetime to make the compiler happy. However, as + // we only use the `key` to looking up the value this lifetime conversion is + // safe. + mem::transmute::<&ValueCacheKey<'_, H>, &ValueCacheKey<'static, H>>(key) + }) + } + + /// Update the cache with the `added` values and the `accessed` values. + /// + /// The `added` values are the ones that have been collected by doing operations on the trie and + /// now should be stored in the shared cache. The `accessed` values are only referenced by the + /// [`ValueCacheKeyHash`] and represent the values that were retrieved from this shared cache + /// through [`Self::get`]. These `accessed` values are being put to the front of the internal + /// [`LruCache`] like the `added` ones. + /// + /// After the internal [`LruCache`] was updated, it is ensured that the internal [`LruCache`] is + /// inside its bounds ([`Self::maximum_size_in_bytes`]). + pub fn update( + &mut self, + added: impl IntoIterator, CachedValue)>, + accessed: impl IntoIterator, + ) { + // The base size in memory per ([`ValueCacheKey`], [`CachedValue`]). + let base_size = mem::size_of::>() + mem::size_of::>(); + let known_keys_entry_size = mem::size_of::>(); + + let update_size_in_bytes = + |size_in_bytes: &mut usize, r_key: Arc<[u8]>, known_keys: &mut HashSet>| { + // If the `strong_count == 2`, it means this is the last instance of the key. + // One being `r_key` and the other being stored in `known_storage_keys`. + let last_instance = Arc::strong_count(&r_key) == 2; + + let key_len = if last_instance { + known_keys.remove(&r_key); + r_key.len() + known_keys_entry_size + } else { + // The key is still in `keys`, because it is still used by another + // `ValueCacheKey`. + 0 + }; + + if let Some(new_size_in_bytes) = size_in_bytes.checked_sub(key_len + base_size) { + *size_in_bytes = new_size_in_bytes; + } else { + *size_in_bytes = 0; + tracing::error!(target: LOG_TARGET, "`SharedValueCache` underflow detected!",); + } + }; + + accessed.into_iter().for_each(|key| { + // Access every node in the lru to put it to the front. + // As we are using the `Hash` variant here, it may leads to putting the wrong value to + // the top. However, the only consequence of this is that we may prune a recently used + // value to early. + self.lru.get(&ValueCacheKey::Hash { + hash: key, + _i_read_the_documentation: IReadTheDocumentation(()), + }); + }); + + added.into_iter().for_each(|(key, value)| { + let (storage_root, storage_key, key_hash) = match key { + ValueCacheKey::Hash { .. } => { + // Ignore the hash variant and try the next. + tracing::error!( + target: LOG_TARGET, + "`SharedValueCached::update` was called with a key to add \ + that uses the `Hash` variant. This would lead to potential hash collision!", + ); + return + }, + ValueCacheKey::Ref { storage_key, storage_root, hash } => + (storage_root, storage_key.into(), hash), + ValueCacheKey::Value { storage_root, storage_key, hash } => + (storage_root, storage_key, hash), + }; + + let (size_update, storage_key) = + match self.known_storage_keys.entry(storage_key.clone()) { + SetEntry::Vacant(v) => { + let len = v.get().len(); + v.insert(); + + // If the key was unknown, we need to also take its length and the size of + // the entry of `known_keys` into account. + (len + base_size + known_keys_entry_size, storage_key) + }, + SetEntry::Occupied(o) => { + // Key is known + (base_size, o.get().clone()) + }, + }; + + self.size_in_bytes += size_update; + + if let Some((r_key, _)) = self + .lru + .push(ValueCacheKey::Value { storage_key, storage_root, hash: key_hash }, value) + { + if let ValueCacheKey::Value { storage_key, .. } = r_key { + update_size_in_bytes( + &mut self.size_in_bytes, + storage_key, + &mut self.known_storage_keys, + ); + } + } + + // Directly ensure that we respect the maximum size. By doing it directly here we + // ensure that the internal map of the [`LruCache`] doesn't grow too much. + while self.maximum_cache_size.exceeds(self.size_in_bytes) { + // This should always be `Some(_)`, otherwise something is wrong! + if let Some((r_key, _)) = self.lru.pop_lru() { + if let ValueCacheKey::Value { storage_key, .. } = r_key { + update_size_in_bytes( + &mut self.size_in_bytes, + storage_key, + &mut self.known_storage_keys, + ); + } + } + } + }); + } + + /// Reset the cache. + fn reset(&mut self) { + self.size_in_bytes = 0; + self.lru.clear(); + self.known_storage_keys.clear(); + } +} + +/// The inner of [`SharedTrieCache`]. +pub(super) struct SharedTrieCacheInner { + node_cache: SharedNodeCache, + value_cache: SharedValueCache, +} + +impl SharedTrieCacheInner { + /// Returns a reference to the [`SharedValueCache`]. + pub(super) fn value_cache(&self) -> &SharedValueCache { + &self.value_cache + } + + /// Returns a mutable reference to the [`SharedValueCache`]. + pub(super) fn value_cache_mut(&mut self) -> &mut SharedValueCache { + &mut self.value_cache + } + + /// Returns a reference to the [`SharedNodeCache`]. + pub(super) fn node_cache(&self) -> &SharedNodeCache { + &self.node_cache + } + + /// Returns a mutable reference to the [`SharedNodeCache`]. + pub(super) fn node_cache_mut(&mut self) -> &mut SharedNodeCache { + &mut self.node_cache + } +} + +/// The shared trie cache. +/// +/// It should be instantiated once per node. It will hold the trie nodes and values of all +/// operations to the state. To not use all available memory it will ensure to stay in the +/// bounds given via the [`CacheSize`] at startup. +/// +/// The instance of this object can be shared between multiple threads. +pub struct SharedTrieCache { + inner: Arc>>, +} + +impl Clone for SharedTrieCache { + fn clone(&self) -> Self { + Self { inner: self.inner.clone() } + } +} + +impl SharedTrieCache { + /// Create a new [`SharedTrieCache`]. + pub fn new(cache_size: CacheSize) -> Self { + let (node_cache_size, value_cache_size) = match cache_size { + CacheSize::Maximum(max) => { + // Allocate 20% for the value cache. + let value_cache_size_in_bytes = (max as f32 * 0.20) as usize; + + ( + CacheSize::Maximum(max - value_cache_size_in_bytes), + CacheSize::Maximum(value_cache_size_in_bytes), + ) + }, + CacheSize::Unlimited => (CacheSize::Unlimited, CacheSize::Unlimited), + }; + + Self { + inner: Arc::new(RwLock::new(SharedTrieCacheInner { + node_cache: SharedNodeCache::new(node_cache_size), + value_cache: SharedValueCache::new(value_cache_size), + })), + } + } + + /// Create a new [`LocalTrieCache`](super::LocalTrieCache) instance from this shared cache. + pub fn local_cache(&self) -> super::LocalTrieCache { + super::LocalTrieCache { + shared: self.clone(), + node_cache: Default::default(), + value_cache: Default::default(), + shared_node_cache_access: Default::default(), + shared_value_cache_access: Default::default(), + } + } + + /// Returns the used memory size of this cache in bytes. + pub fn used_memory_size(&self) -> usize { + let inner = self.inner.read(); + let value_cache_size = inner.value_cache.size_in_bytes; + let node_cache_size = inner.node_cache.size_in_bytes; + + node_cache_size + value_cache_size + } + + /// Reset the node cache. + pub fn reset_node_cache(&self) { + self.inner.write().node_cache.reset(); + } + + /// Reset the value cache. + pub fn reset_value_cache(&self) { + self.inner.write().value_cache.reset(); + } + + /// Reset the entire cache. + pub fn reset(&self) { + self.reset_node_cache(); + self.reset_value_cache(); + } + + /// Returns the read locked inner. + pub(super) fn read_lock_inner(&self) -> RwLockReadGuard<'_, SharedTrieCacheInner> { + self.inner.read() + } + + /// Returns the write locked inner. + pub(super) fn write_lock_inner(&self) -> RwLockWriteGuard<'_, SharedTrieCacheInner> { + self.inner.write() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::H256 as Hash; + + #[test] + fn shared_value_cache_works() { + let base_size = mem::size_of::>() + mem::size_of::>(); + let arc_size = mem::size_of::>(); + + let mut cache = SharedValueCache::::new(CacheSize::Maximum( + (base_size + arc_size + 10) * 10, + )); + + let key = vec![0; 10]; + + let root0 = Hash::repeat_byte(1); + let root1 = Hash::repeat_byte(2); + + cache.update( + vec![ + (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting), + (ValueCacheKey::new_value(&key[..], root1), CachedValue::NonExisting), + ], + vec![], + ); + + // Ensure that the basics are working + assert_eq!(1, cache.known_storage_keys.len()); + assert_eq!(3, Arc::strong_count(cache.known_storage_keys.get(&key[..]).unwrap())); + assert_eq!(base_size * 2 + key.len() + arc_size, cache.size_in_bytes); + + // Just accessing a key should not change anything on the size and number of entries. + cache.update(vec![], vec![ValueCacheKey::hash_data(&key[..], &root0)]); + assert_eq!(1, cache.known_storage_keys.len()); + assert_eq!(3, Arc::strong_count(cache.known_storage_keys.get(&key[..]).unwrap())); + assert_eq!(base_size * 2 + key.len() + arc_size, cache.size_in_bytes); + + // Add 9 other entries and this should move out the key for `root1`. + cache.update( + (1..10) + .map(|i| vec![i; 10]) + .map(|key| (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting)), + vec![], + ); + + assert_eq!(10, cache.known_storage_keys.len()); + assert_eq!(2, Arc::strong_count(cache.known_storage_keys.get(&key[..]).unwrap())); + assert_eq!((base_size + key.len() + arc_size) * 10, cache.size_in_bytes); + assert!(matches!( + cache.get(&ValueCacheKey::new_ref(&key, root0)).unwrap(), + CachedValue::::NonExisting + )); + assert!(cache.get(&ValueCacheKey::new_ref(&key, root1)).is_none()); + + cache.update( + vec![(ValueCacheKey::new_value(vec![10; 10], root0), CachedValue::NonExisting)], + vec![], + ); + + assert!(cache.known_storage_keys.get(&key[..]).is_none()); + } + + #[test] + fn value_cache_key_eq_works() { + let storage_key = &b"something"[..]; + let storage_key2 = &b"something2"[..]; + let storage_root = Hash::random(); + + let value = ValueCacheKey::new_value(storage_key, storage_root); + // Ref gets the same hash, but a different storage key + let ref_ = + ValueCacheKey::Ref { storage_root, storage_key: storage_key2, hash: value.get_hash() }; + let hash = ValueCacheKey::Hash { + hash: value.get_hash(), + _i_read_the_documentation: IReadTheDocumentation(()), + }; + + // Ensure that the hash variants is equal to `value`, `ref_` and itself. + assert!(hash == value); + assert!(value == hash); + assert!(hash == ref_); + assert!(ref_ == hash); + assert!(hash == hash); + + // But when we compare `value` and `ref_` the different storage key is detected. + assert!(value != ref_); + assert!(ref_ != value); + } +} diff --git a/primitives/trie/src/error.rs b/primitives/trie/src/error.rs index e0b3642b6db76..a781d408e994f 100644 --- a/primitives/trie/src/error.rs +++ b/primitives/trie/src/error.rs @@ -15,18 +15,33 @@ // See the License for the specific language governing permissions and // limitations under the License. -/// Error for trie node decoding. +use sp_std::{boxed::Box, vec::Vec}; + +/// Error type used for trie related errors. #[derive(Debug, PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(thiserror::Error))] -pub enum Error { +pub enum Error { #[cfg_attr(feature = "std", error("Bad format"))] BadFormat, #[cfg_attr(feature = "std", error("Decoding failed: {0}"))] Decode(#[cfg_attr(feature = "std", source)] codec::Error), + #[cfg_attr( + feature = "std", + error("Recorded key ({0:x?}) access with value as found={1}, but could not confirm with trie.") + )] + InvalidRecording(Vec, bool), + #[cfg_attr(feature = "std", error("Trie error: {0:?}"))] + TrieError(Box>), } -impl From for Error { +impl From for Error { fn from(x: codec::Error) -> Self { Error::Decode(x) } } + +impl From>> for Error { + fn from(x: Box>) -> Self { + Error::TrieError(x) + } +} diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 1dca617acf912..fafa2a2891ce4 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -19,9 +19,13 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] +pub mod cache; mod error; mod node_codec; mod node_header; +#[cfg(feature = "std")] +pub mod recorder; mod storage_proof; mod trie_codec; mod trie_stream; @@ -46,8 +50,8 @@ use trie_db::proof::{generate_proof, verify_proof}; pub use trie_db::{ nibble_ops, node::{NodePlan, ValuePlan}, - CError, DBValue, Query, Recorder, Trie, TrieConfiguration, TrieDBIterator, TrieDBKeyIterator, - TrieLayout, TrieMut, + CError, DBValue, Query, Recorder, Trie, TrieCache, TrieConfiguration, TrieDBIterator, + TrieDBKeyIterator, TrieLayout, TrieMut, TrieRecorder, }; /// The Substrate format implementation of `TrieStream`. pub use trie_stream::TrieStream; @@ -167,11 +171,15 @@ pub type MemoryDB = memory_db::MemoryDB, trie_db::DB pub type GenericMemoryDB = memory_db::MemoryDB; /// Persistent trie database read-access interface for the a given hasher. -pub type TrieDB<'a, L> = trie_db::TrieDB<'a, L>; +pub type TrieDB<'a, 'cache, L> = trie_db::TrieDB<'a, 'cache, L>; +/// Builder for creating a [`TrieDB`]. +pub type TrieDBBuilder<'a, 'cache, L> = trie_db::TrieDBBuilder<'a, 'cache, L>; /// Persistent trie database write-access interface for the a given hasher. pub type TrieDBMut<'a, L> = trie_db::TrieDBMut<'a, L>; +/// Builder for creating a [`TrieDBMut`]. +pub type TrieDBMutBuilder<'a, L> = trie_db::TrieDBMutBuilder<'a, L>; /// Querying interface, as in `trie_db` but less generic. -pub type Lookup<'a, L, Q> = trie_db::Lookup<'a, L, Q>; +pub type Lookup<'a, 'cache, L, Q> = trie_db::Lookup<'a, 'cache, L, Q>; /// Hash type for a trie layout. pub type TrieHash = <::Hash as Hasher>::Out; /// This module is for non generic definition of trie type. @@ -180,18 +188,23 @@ pub mod trie_types { use super::*; /// Persistent trie database read-access interface for the a given hasher. + /// /// Read only V1 and V0 are compatible, thus we always use V1. - pub type TrieDB<'a, H> = super::TrieDB<'a, LayoutV1>; + pub type TrieDB<'a, 'cache, H> = super::TrieDB<'a, 'cache, LayoutV1>; + /// Builder for creating a [`TrieDB`]. + pub type TrieDBBuilder<'a, 'cache, H> = super::TrieDBBuilder<'a, 'cache, LayoutV1>; /// Persistent trie database write-access interface for the a given hasher. pub type TrieDBMutV0<'a, H> = super::TrieDBMut<'a, LayoutV0>; + /// Builder for creating a [`TrieDBMutV0`]. + pub type TrieDBMutBuilderV0<'a, H> = super::TrieDBMutBuilder<'a, LayoutV0>; /// Persistent trie database write-access interface for the a given hasher. pub type TrieDBMutV1<'a, H> = super::TrieDBMut<'a, LayoutV1>; + /// Builder for creating a [`TrieDBMutV1`]. + pub type TrieDBMutBuilderV1<'a, H> = super::TrieDBMutBuilder<'a, LayoutV1>; /// Querying interface, as in `trie_db` but less generic. - pub type LookupV0<'a, H, Q> = trie_db::Lookup<'a, LayoutV0, Q>; - /// Querying interface, as in `trie_db` but less generic. - pub type LookupV1<'a, H, Q> = trie_db::Lookup<'a, LayoutV1, Q>; + pub type Lookup<'a, 'cache, H, Q> = trie_db::Lookup<'a, 'cache, LayoutV1, Q>; /// As in `trie_db`, but less generic, error type for the crate. - pub type TrieError = trie_db::TrieError; + pub type TrieError = trie_db::TrieError>; } /// Create a proof for a subset of keys in a trie. @@ -213,9 +226,7 @@ where K: 'a + AsRef<[u8]>, DB: hash_db::HashDBRef, { - // Can use default layout (read only). - let trie = TrieDB::::new(db, &root)?; - generate_proof(&trie, keys) + generate_proof::<_, L, _, _>(db, &root, keys) } /// Verify a set of key-value pairs against a trie root and a proof. @@ -245,6 +256,8 @@ pub fn delta_trie_root( db: &mut DB, mut root: TrieHash, delta: I, + recorder: Option<&mut dyn trie_db::TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, ) -> Result, Box>> where I: IntoIterator, @@ -254,7 +267,10 @@ where DB: hash_db::HashDB, { { - let mut trie = TrieDBMut::::from_existing(db, &mut root)?; + let mut trie = TrieDBMutBuilder::::from_existing(db, &mut root) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build(); let mut delta = delta.into_iter().collect::>(); delta.sort_by(|l, r| l.0.borrow().cmp(r.0.borrow())); @@ -271,33 +287,32 @@ where } /// Read a value from the trie. -pub fn read_trie_value( +pub fn read_trie_value>( db: &DB, root: &TrieHash, key: &[u8], -) -> Result>, Box>> -where - L: TrieConfiguration, - DB: hash_db::HashDBRef, -{ - TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) + recorder: Option<&mut dyn TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, +) -> Result>, Box>> { + TrieDBBuilder::::new(db, root) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build() + .get(key) } /// Read a value from the trie with given Query. -pub fn read_trie_value_with( +pub fn read_trie_value_with< + L: TrieLayout, + Q: Query>, + DB: hash_db::HashDBRef, +>( db: &DB, root: &TrieHash, key: &[u8], query: Q, -) -> Result>, Box>> -where - L: TrieConfiguration, - Q: Query, - DB: hash_db::HashDBRef, -{ - TrieDB::::new(&*db, root)? - .get_with(key, query) - .map(|x| x.map(|val| val.to_vec())) +) -> Result>, Box>> { + TrieDBBuilder::::new(db, root).build().get_with(key, query) } /// Determine the empty trie root. @@ -328,6 +343,8 @@ pub fn child_delta_trie_root( db: &mut DB, root_data: RD, delta: I, + recorder: Option<&mut dyn TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, ) -> Result<::Out, Box>> where I: IntoIterator, @@ -341,46 +358,49 @@ where // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_data.as_ref()); - let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - delta_trie_root::(&mut db, root, delta) + let mut db = KeySpacedDBMut::new(db, keyspace); + delta_trie_root::(&mut db, root, delta, recorder, cache) } -/// Record all keys for a given root. -pub fn record_all_keys( +/// Read a value from the child trie. +pub fn read_child_trie_value( + keyspace: &[u8], db: &DB, root: &TrieHash, - recorder: &mut Recorder>, -) -> Result<(), Box>> + key: &[u8], + recorder: Option<&mut dyn TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, +) -> Result>, Box>> where DB: hash_db::HashDBRef, { - let trie = TrieDB::::new(&*db, root)?; - let iter = trie.iter()?; - - for x in iter { - let (key, _) = x?; - - // there's currently no API like iter_with() - // => use iter to enumerate all keys AND lookup each - // key using get_with - trie.get_with(&key, &mut *recorder)?; - } - - Ok(()) + let db = KeySpacedDB::new(db, keyspace); + TrieDBBuilder::::new(&db, &root) + .with_optional_recorder(recorder) + .with_optional_cache(cache) + .build() + .get(key) + .map(|x| x.map(|val| val.to_vec())) } -/// Read a value from the child trie. -pub fn read_child_trie_value( +/// Read a hash from the child trie. +pub fn read_child_trie_hash( keyspace: &[u8], db: &DB, root: &TrieHash, key: &[u8], -) -> Result>, Box>> + recorder: Option<&mut dyn TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, +) -> Result>, Box>> where DB: hash_db::HashDBRef, { - let db = KeySpacedDB::new(&*db, keyspace); - TrieDB::::new(&db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) + let db = KeySpacedDB::new(db, keyspace); + TrieDBBuilder::::new(&db, &root) + .with_optional_recorder(recorder) + .with_optional_cache(cache) + .build() + .get_hash(key) } /// Read a value from the child trie with given query. @@ -400,21 +420,22 @@ where // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_slice); - let db = KeySpacedDB::new(&*db, keyspace); - TrieDB::::new(&db, &root)? + let db = KeySpacedDB::new(db, keyspace); + TrieDBBuilder::::new(&db, &root) + .build() .get_with(key, query) .map(|x| x.map(|val| val.to_vec())) } /// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the /// prefix of every key value. -pub struct KeySpacedDB<'a, DB, H>(&'a DB, &'a [u8], PhantomData); +pub struct KeySpacedDB<'a, DB: ?Sized, H>(&'a DB, &'a [u8], PhantomData); /// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the /// prefix of every key value. /// /// Mutable variant of `KeySpacedDB`, see [`KeySpacedDB`]. -pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); +pub struct KeySpacedDBMut<'a, DB: ?Sized, H>(&'a mut DB, &'a [u8], PhantomData); /// Utility function used to merge some byte data (keyspace) and `prefix` data /// before calling key value database primitives. @@ -425,20 +446,14 @@ fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) (result, prefix.1) } -impl<'a, DB, H> KeySpacedDB<'a, DB, H> -where - H: Hasher, -{ +impl<'a, DB: ?Sized, H> KeySpacedDB<'a, DB, H> { /// instantiate new keyspaced db pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { KeySpacedDB(db, ks, PhantomData) } } -impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> -where - H: Hasher, -{ +impl<'a, DB: ?Sized, H> KeySpacedDBMut<'a, DB, H> { /// instantiate new keyspaced db pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { KeySpacedDBMut(db, ks, PhantomData) @@ -447,7 +462,7 @@ where impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where - DB: hash_db::HashDBRef, + DB: hash_db::HashDBRef + ?Sized, H: Hasher, T: From<&'static [u8]>, { @@ -501,7 +516,7 @@ where T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { fn as_hash_db(&self) -> &dyn hash_db::HashDB { - &*self + self } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { @@ -550,7 +565,7 @@ mod tests { let persistent = { let mut memdb = MemoryDBMeta::default(); let mut root = Default::default(); - let mut t = TrieDBMut::::new(&mut memdb, &mut root); + let mut t = TrieDBMutBuilder::::new(&mut memdb, &mut root).build(); for (x, y) in input.iter().rev() { t.insert(x, y).unwrap(); } @@ -564,13 +579,13 @@ mod tests { let mut memdb = MemoryDBMeta::default(); let mut root = Default::default(); { - let mut t = TrieDBMut::::new(&mut memdb, &mut root); + let mut t = TrieDBMutBuilder::::new(&mut memdb, &mut root).build(); for (x, y) in input.clone() { t.insert(x, y).unwrap(); } } { - let t = TrieDB::::new(&memdb, &root).unwrap(); + let t = TrieDBBuilder::::new(&memdb, &root).build(); assert_eq!( input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), t.iter() @@ -592,7 +607,7 @@ mod tests { fn default_trie_root() { let mut db = MemoryDB::default(); let mut root = TrieHash::::default(); - let mut empty = TrieDBMut::::new(&mut db, &mut root); + let mut empty = TrieDBMutBuilder::::new(&mut db, &mut root).build(); empty.commit(); let root1 = empty.root().as_ref().to_vec(); let root2: Vec = LayoutV1::trie_root::<_, Vec, Vec>(std::iter::empty()) @@ -695,15 +710,12 @@ mod tests { check_input(&input); } - fn populate_trie<'db, T>( + fn populate_trie<'db, T: TrieConfiguration>( db: &'db mut dyn HashDB, root: &'db mut TrieHash, v: &[(Vec, Vec)], - ) -> TrieDBMut<'db, T> - where - T: TrieConfiguration, - { - let mut t = TrieDBMut::::new(db, root); + ) -> TrieDBMut<'db, T> { + let mut t = TrieDBMutBuilder::::new(db, root).build(); for i in 0..v.len() { let key: &[u8] = &v[i].0; let val: &[u8] = &v[i].1; @@ -841,7 +853,7 @@ mod tests { let mut root = Default::default(); let _ = populate_trie::(&mut mdb, &mut root, &pairs); - let trie = TrieDB::::new(&mdb, &root).unwrap(); + let trie = TrieDBBuilder::::new(&mdb, &root).build(); let iter = trie.iter().unwrap(); let mut iter_pairs = Vec::new(); @@ -954,12 +966,16 @@ mod tests { &mut proof_db.clone(), storage_root, valid_delta, + None, + None, ) .unwrap(); let second_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, invalid_delta, + None, + None, ) .unwrap(); diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index bd0ba27483e66..4b3e69adb7041 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -25,7 +25,7 @@ use sp_std::{borrow::Borrow, marker::PhantomData, ops::Range, vec::Vec}; use trie_db::{ nibble_ops, node::{NibbleSlicePlan, NodeHandlePlan, NodePlan, Value, ValuePlan}, - ChildReference, NodeCodec as NodeCodecT, Partial, + ChildReference, NodeCodec as NodeCodecT, }; /// Helper struct for trie node decoder. This implements `codec::Input` on a byte slice, while @@ -85,7 +85,7 @@ where H: Hasher, { const ESCAPE_HEADER: Option = Some(trie_constants::ESCAPE_COMPACT_HEADER); - type Error = Error; + type Error = Error; type HashOut = H::Out; fn hashed_null_node() -> ::Out { @@ -185,19 +185,19 @@ where &[trie_constants::EMPTY_TRIE] } - fn leaf_node(partial: Partial, value: Value) -> Vec { + fn leaf_node(partial: impl Iterator, number_nibble: usize, value: Value) -> Vec { let contains_hash = matches!(&value, Value::Node(..)); let mut output = if contains_hash { - partial_encode(partial, NodeKind::HashedValueLeaf) + partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueLeaf) } else { - partial_encode(partial, NodeKind::Leaf) + partial_from_iterator_encode(partial, number_nibble, NodeKind::Leaf) }; match value { Value::Inline(value) => { Compact(value.len() as u32).encode_to(&mut output); output.extend_from_slice(value); }, - Value::Node(hash, _) => { + Value::Node(hash) => { debug_assert!(hash.len() == H::LENGTH); output.extend_from_slice(hash); }, @@ -244,7 +244,7 @@ where Compact(value.len() as u32).encode_to(&mut output); output.extend_from_slice(value); }, - Some(Value::Node(hash, _)) => { + Some(Value::Node(hash)) => { debug_assert!(hash.len() == H::LENGTH); output.extend_from_slice(hash); }, @@ -295,31 +295,6 @@ fn partial_from_iterator_encode>( output } -/// Encode and allocate node type header (type and size), and partial value. -/// Same as `partial_from_iterator_encode` but uses non encoded `Partial` as input. -fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { - let number_nibble_encoded = (partial.0).0 as usize; - let nibble_count = partial.1.len() * nibble_ops::NIBBLE_PER_BYTE + number_nibble_encoded; - - let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); - - let mut output = Vec::with_capacity(4 + partial.1.len()); - match node_kind { - NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), - NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), - NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), - NodeKind::HashedValueLeaf => - NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output), - NodeKind::HashedValueBranch => - NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output), - }; - if number_nibble_encoded > 0 { - output.push(nibble_ops::pad_right((partial.0).1)); - } - output.extend_from_slice(partial.1); - output -} - const BITMAP_LENGTH: usize = 2; /// Radix 16 trie, bitmap encoding implementation, @@ -329,7 +304,7 @@ const BITMAP_LENGTH: usize = 2; pub(crate) struct Bitmap(u16); impl Bitmap { - pub fn decode(mut data: &[u8]) -> Result { + pub fn decode(mut data: &[u8]) -> Result { Ok(Bitmap(u16::decode(&mut data)?)) } diff --git a/primitives/trie/src/recorder.rs b/primitives/trie/src/recorder.rs new file mode 100644 index 0000000000000..5599ad1c36904 --- /dev/null +++ b/primitives/trie/src/recorder.rs @@ -0,0 +1,284 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Trie recorder +//! +//! Provides an implementation of the [`TrieRecorder`](trie_db::TrieRecorder) trait. It can be used +//! to record storage accesses to the state to generate a [`StorageProof`]. + +use crate::{NodeCodec, StorageProof}; +use codec::Encode; +use hash_db::Hasher; +use parking_lot::Mutex; +use std::{ + collections::HashMap, + marker::PhantomData, + mem, + ops::DerefMut, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, +}; +use trie_db::{RecordedForKey, TrieAccess}; + +const LOG_TARGET: &str = "trie-recorder"; + +/// The internals of [`Recorder`]. +struct RecorderInner { + /// The keys for that we have recorded the trie nodes and if we have recorded up to the value. + recorded_keys: HashMap, RecordedForKey>, + /// The encoded nodes we accessed while recording. + accessed_nodes: HashMap>, +} + +impl Default for RecorderInner { + fn default() -> Self { + Self { recorded_keys: Default::default(), accessed_nodes: Default::default() } + } +} + +/// The trie recorder. +/// +/// It can be used to record accesses to the trie and then to convert them into a [`StorageProof`]. +pub struct Recorder { + inner: Arc>>, + /// The estimated encoded size of the storage proof this recorder will produce. + /// + /// We store this in an atomic to be able to fetch the value while the `inner` is may locked. + encoded_size_estimation: Arc, +} + +impl Default for Recorder { + fn default() -> Self { + Self { inner: Default::default(), encoded_size_estimation: Arc::new(0.into()) } + } +} + +impl Clone for Recorder { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + encoded_size_estimation: self.encoded_size_estimation.clone(), + } + } +} + +impl Recorder { + /// Returns the recorder as [`TrieRecorder`](trie_db::TrieRecorder) compatible type. + pub fn as_trie_recorder(&self) -> impl trie_db::TrieRecorder + '_ { + TrieRecorder:: { + inner: self.inner.lock(), + encoded_size_estimation: self.encoded_size_estimation.clone(), + _phantom: PhantomData, + } + } + + /// Drain the recording into a [`StorageProof`]. + /// + /// While a recorder can be cloned, all share the same internal state. After calling this + /// function, all other instances will have their internal state reset as well. + /// + /// If you don't want to drain the recorded state, use [`Self::to_storage_proof`]. + /// + /// Returns the [`StorageProof`]. + pub fn drain_storage_proof(self) -> StorageProof { + let mut recorder = mem::take(&mut *self.inner.lock()); + StorageProof::new(recorder.accessed_nodes.drain().map(|(_, v)| v)) + } + + /// Convert the recording to a [`StorageProof`]. + /// + /// In contrast to [`Self::drain_storage_proof`] this doesn't consumes and doesn't clears the + /// recordings. + /// + /// Returns the [`StorageProof`]. + pub fn to_storage_proof(&self) -> StorageProof { + let recorder = self.inner.lock(); + StorageProof::new(recorder.accessed_nodes.iter().map(|(_, v)| v.clone())) + } + + /// Returns the estimated encoded size of the proof. + /// + /// The estimation is based on all the nodes that were accessed until now while + /// accessing the trie. + pub fn estimate_encoded_size(&self) -> usize { + self.encoded_size_estimation.load(Ordering::Relaxed) + } + + /// Reset the state. + /// + /// This discards all recorded data. + pub fn reset(&self) { + mem::take(&mut *self.inner.lock()); + self.encoded_size_estimation.store(0, Ordering::Relaxed); + } +} + +/// The [`TrieRecorder`](trie_db::TrieRecorder) implementation. +struct TrieRecorder { + inner: I, + encoded_size_estimation: Arc, + _phantom: PhantomData, +} + +impl>> trie_db::TrieRecorder + for TrieRecorder +{ + fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) { + let mut encoded_size_update = 0; + + match access { + TrieAccess::NodeOwned { hash, node_owned } => { + tracing::trace!( + target: LOG_TARGET, + hash = ?hash, + "Recording node", + ); + + self.inner.accessed_nodes.entry(hash).or_insert_with(|| { + let node = node_owned.to_encoded::>(); + + encoded_size_update += node.encoded_size(); + + node + }); + }, + TrieAccess::EncodedNode { hash, encoded_node } => { + tracing::trace!( + target: LOG_TARGET, + hash = ?hash, + "Recording node", + ); + + self.inner.accessed_nodes.entry(hash).or_insert_with(|| { + let node = encoded_node.into_owned(); + + encoded_size_update += node.encoded_size(); + + node + }); + }, + TrieAccess::Value { hash, value, full_key } => { + tracing::trace!( + target: LOG_TARGET, + hash = ?hash, + key = ?sp_core::hexdisplay::HexDisplay::from(&full_key), + "Recording value", + ); + + self.inner.accessed_nodes.entry(hash).or_insert_with(|| { + let value = value.into_owned(); + + encoded_size_update += value.encoded_size(); + + value + }); + + self.inner + .recorded_keys + .entry(full_key.to_vec()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert(RecordedForKey::Value); + }, + TrieAccess::Hash { full_key } => { + tracing::trace!( + target: LOG_TARGET, + key = ?sp_core::hexdisplay::HexDisplay::from(&full_key), + "Recorded hash access for key", + ); + + // We don't need to update the `encoded_size_update` as the hash was already + // accounted for by the recorded node that holds the hash. + self.inner + .recorded_keys + .entry(full_key.to_vec()) + .or_insert(RecordedForKey::Hash); + }, + TrieAccess::NonExisting { full_key } => { + tracing::trace!( + target: LOG_TARGET, + key = ?sp_core::hexdisplay::HexDisplay::from(&full_key), + "Recorded non-existing value access for key", + ); + + // Non-existing access means we recorded all trie nodes up to the value. + // Not the actual value, as it doesn't exist, but all trie nodes to know + // that the value doesn't exist in the trie. + self.inner + .recorded_keys + .entry(full_key.to_vec()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert(RecordedForKey::Value); + }, + }; + + self.encoded_size_estimation.fetch_add(encoded_size_update, Ordering::Relaxed); + } + + fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> RecordedForKey { + self.inner.recorded_keys.get(key).copied().unwrap_or(RecordedForKey::None) + } +} + +#[cfg(test)] +mod tests { + use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut}; + + type MemoryDB = crate::MemoryDB; + type Layout = crate::LayoutV1; + type Recorder = super::Recorder; + + const TEST_DATA: &[(&[u8], &[u8])] = + &[(b"key1", b"val1"), (b"key2", b"val2"), (b"key3", b"val3"), (b"key4", b"val4")]; + + fn create_trie() -> (MemoryDB, TrieHash) { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + { + let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); + for (k, v) in TEST_DATA { + trie.insert(k, v).expect("Inserts data"); + } + } + + (db, root) + } + + #[test] + fn recorder_works() { + let (db, root) = create_trie(); + + let recorder = Recorder::default(); + + { + let mut trie_recorder = recorder.as_trie_recorder(); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder) + .build(); + assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap()); + } + + let storage_proof = recorder.drain_storage_proof(); + let memory_db: MemoryDB = storage_proof.into_memory_db(); + + // Check that we recorded the required data + let trie = TrieDBBuilder::::new(&memory_db, &root).build(); + assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap()); + } +} diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index f6139584dbbad..8fdb04ee20ed0 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -88,7 +88,7 @@ impl StorageProof { pub fn into_compact_proof( self, root: H::Out, - ) -> Result> { + ) -> Result>> { crate::encode_compact::>(self, root) } @@ -130,7 +130,7 @@ impl CompactProof { pub fn to_storage_proof( &self, expected_root: Option<&H::Out>, - ) -> Result<(StorageProof, H::Out), crate::CompactProofError> { + ) -> Result<(StorageProof, H::Out), crate::CompactProofError>> { let mut db = crate::MemoryDB::::new(&[]); let root = crate::decode_compact::, _, _>( &mut db, @@ -157,7 +157,8 @@ impl CompactProof { pub fn to_memory_db( &self, expected_root: Option<&H::Out>, - ) -> Result<(crate::MemoryDB, H::Out), crate::CompactProofError> { + ) -> Result<(crate::MemoryDB, H::Out), crate::CompactProofError>> + { let mut db = crate::MemoryDB::::new(&[]); let root = crate::decode_compact::, _, _>( &mut db, diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index d29f5a98f31b9..949f9a6e284eb 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -78,7 +78,7 @@ where let mut child_tries = Vec::new(); { // fetch child trie roots - let trie = crate::TrieDB::::new(db, &top_root)?; + let trie = crate::TrieDBBuilder::::new(db, &top_root).build(); let mut iter = trie.iter()?; @@ -159,7 +159,7 @@ where let mut child_tries = Vec::new(); let partial_db = proof.into_memory_db(); let mut compact_proof = { - let trie = crate::TrieDB::::new(&partial_db, &root)?; + let trie = crate::TrieDBBuilder::::new(&partial_db, &root).build(); let mut iter = trie.iter()?; @@ -197,7 +197,7 @@ where continue } - let trie = crate::TrieDB::::new(&partial_db, &child_root)?; + let trie = crate::TrieDBBuilder::::new(&partial_db, &child_root).build(); let child_proof = trie_db::encode_compact::(&trie)?; compact_proof.extend(child_proof); diff --git a/primitives/version/proc-macro/src/decl_runtime_version.rs b/primitives/version/proc-macro/src/decl_runtime_version.rs index ee81940b53993..9a25adfa5fca2 100644 --- a/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -37,7 +37,7 @@ pub fn decl_runtime_version_impl(input: proc_macro::TokenStream) -> proc_macro:: } fn decl_runtime_version_impl_inner(item: ItemConst) -> Result { - let runtime_version = ParseRuntimeVersion::parse_expr(&*item.expr)?.build(item.expr.span())?; + let runtime_version = ParseRuntimeVersion::parse_expr(&item.expr)?.build(item.expr.span())?; let link_section = generate_emit_link_section_decl(&runtime_version.encode(), "runtime_version"); diff --git a/rustfmt.toml b/rustfmt.toml index 441913f619cdc..f6fbe80064fce 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -21,3 +21,4 @@ match_block_trailing_comma = true trailing_comma = "Vertical" trailing_semicolon = false use_field_init_shorthand = true +edition = "2021" diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index 25ecad5bc5264..eedd2ee0bb409 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -149,7 +149,7 @@ build-rustdoc: - ./crate-docs/ script: - rusty-cachier snapshot create - - time cargo +nightly doc --workspace --all-features --verbose + - time cargo +nightly doc --workspace --all-features --verbose --no-deps - rm -f $CARGO_TARGET_DIR/doc/.lock - mv $CARGO_TARGET_DIR/doc ./crate-docs # FIXME: remove me after CI image gets nonroot diff --git a/scripts/ci/gitlab/pipeline/publish.yml b/scripts/ci/gitlab/pipeline/publish.yml index c02b50e0cbc1d..8f7a619f8b196 100644 --- a/scripts/ci/gitlab/pipeline/publish.yml +++ b/scripts/ci/gitlab/pipeline/publish.yml @@ -147,3 +147,17 @@ publish-draft-release: script: - ./scripts/ci/gitlab/publish_draft_release.sh allow_failure: true + +# Ref: https://github.com/paritytech/opstooling/issues/111 +update-node-template: + stage: publish + extends: .kubernetes-env + rules: + - if: $CI_COMMIT_REF_NAME =~ /^polkadot-v[0-9]+\.[0-9]+.*$/ # i.e. polkadot-v1.0.99, polkadot-v2.1rc1 + script: + - git clone --depth=1 --branch="$PIPELINE_SCRIPTS_TAG" https://github.com/paritytech/pipeline-scripts + - ./pipeline-scripts/update_substrate_template.sh + --repo-name "substrate-node-template" + --template-path "bin/node-template" + --github-api-token "$GITHUB_TOKEN" + --polkadot-branch "$CI_COMMIT_REF_NAME" diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index cc167410f94a4..acd2cb1fa7855 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -51,38 +51,22 @@ cargo-clippy: - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo +nightly clippy --all-targets - rusty-cachier cache upload -cargo-check-nixos: - stage: test - # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs - needs: - - job: cargo-clippy - artifacts: false - extends: - - .docker-env - - .test-refs - before_script: [] - # Don't use CI_IMAGE here because it breaks nightly checks of paritytech/ci-linux image - image: nixos/nix - variables: - SNAP: "DUMMY" - WS_API: "DUMMY" - script: - - nix-channel --update - - nix-shell shell.nix - - nix-shell --run "cargo check --workspace --all-targets --all-features" - cargo-check-benches: stage: test variables: - RUSTY_CACHIER_TOOLCHAIN: nightly + # Override to use nightly toolchain + RUSTY_CACHIER_TOOLCHAIN: "nightly" + CI_JOB_NAME: "cargo-check-benches" extends: - .docker-env - .test-refs - .collect-artifacts + - .pipeline-stopper-artifacts before_script: # perform rusty-cachier operations before any further modifications to the git repo to make cargo feel cheated not so much - !reference [.rust-info-script, script] - !reference [.rusty-cachier, before_script] + - !reference [.pipeline-stopper-vars, script] # merges in the master branch on PRs - if [ $CI_COMMIT_REF_NAME != "master" ]; then git fetch origin +master:master; @@ -91,16 +75,25 @@ cargo-check-benches: git config user.email "ci@gitlab.parity.io"; git merge $CI_COMMIT_REF_NAME --verbose --no-edit; fi + parallel: 2 script: - rusty-cachier snapshot create - mkdir -p ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA - - SKIP_WASM_BUILD=1 time cargo +nightly check --benches --all - - 'cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small --json - | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::native::sr25519::transfer_keep_alive::paritydb::small.json' - - 'cargo run --release -p node-bench -- ::trie::read::small --json - | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json' - - sccache -s - - rusty-cachier cache upload + # this job is executed in parallel on two runners + - echo "___Running benchmarks___"; + - case ${CI_NODE_INDEX} in + 1) + SKIP_WASM_BUILD=1 time cargo +nightly check --benches --all; + cargo run --release -p node-bench -- ::trie::read::small --json + | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json; + echo "___Uploading cache for rusty-cachier___"; + rusty-cachier cache upload + ;; + 2) + cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small --json + | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::native::sr25519::transfer_keep_alive::paritydb::small.json + ;; + esac tags: - linux-docker-benches @@ -116,9 +109,13 @@ node-bench-regression-guard: # this is a DAG - job: cargo-check-benches artifacts: true - # this does not like a DAG, just polls the artifact + # polls artifact from master to compare with current result - project: $CI_PROJECT_PATH - job: cargo-check-benches + job: "cargo-check-benches 1/2" + ref: master + artifacts: true + - project: $CI_PROJECT_PATH + job: "cargo-check-benches 2/2" ref: master artifacts: true variables: @@ -136,6 +133,7 @@ cargo-check-subkey: extends: - .docker-env - .test-refs + - .pipeline-stopper-artifacts script: - rusty-cachier snapshot create - cd ./bin/utils/subkey @@ -186,13 +184,13 @@ test-deterministic-wasm: script: - rusty-cachier snapshot create # build runtime - - cargo build --verbose --release -p node-runtime + - cargo build --verbose --release -p kitchensink-runtime # make checksum - - sha256sum $CARGO_TARGET_DIR/release/wbuild/node-runtime/target/wasm32-unknown-unknown/release/node_runtime.wasm > checksum.sha256 + - sha256sum $CARGO_TARGET_DIR/release/wbuild/kitchensink-runtime/target/wasm32-unknown-unknown/release/kitchensink_runtime.wasm > checksum.sha256 # clean up - rm -rf $CARGO_TARGET_DIR/release/wbuild # build again - - cargo build --verbose --release -p node-runtime + - cargo build --verbose --release -p kitchensink-runtime # confirm checksum - sha256sum -c ./checksum.sha256 # clean up again, don't put release binaries into the cache @@ -204,6 +202,7 @@ test-linux-stable: extends: - .docker-env - .test-refs + - .pipeline-stopper-artifacts variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. @@ -213,14 +212,25 @@ test-linux-stable: WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" # Ensure we run the UI tests. RUN_UI_TESTS: 1 + # needed for rusty-cachier to keep cache in test-linux-stable folder and not in test-linux-stable-1/3 + CI_JOB_NAME: "test-linux-stable" + parallel: 3 script: - rusty-cachier snapshot create - # TODO: add to paritytech/ci-linux image - - time cargo install cargo-nextest # this job runs all tests in former runtime-benchmarks, frame-staking and wasmtime tests + # tests are partitioned by nextest and executed in parallel on $CI_NODE_TOTAL runners # node-cli is excluded until https://github.com/paritytech/substrate/issues/11321 fixed - - time cargo nextest run --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path ./bin/node/cli/Cargo.toml --exclude node-cli - - rusty-cachier cache upload + - echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}" + - time cargo nextest run --workspace + --locked + --release + --verbose + --features runtime-benchmarks + --manifest-path ./bin/node/cli/Cargo.toml + --exclude node-cli + --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} + # we need to update cache only from one job + - if [ ${CI_NODE_INDEX} == 1 ]; then rusty-cachier cache upload; fi test-frame-support: stage: test @@ -267,6 +277,24 @@ test-linux-stable-extra: - time cargo test --doc --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path ./bin/node/cli/Cargo.toml - rusty-cachier cache upload +# This job runs all benchmarks defined in the `/bin/node/runtime` once to check that there are no errors. +quick-benchmarks: + stage: test + extends: + - .docker-env + - .test-refs + variables: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + RUST_BACKTRACE: "full" + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + script: + - rusty-cachier snapshot create + - time cargo run --release --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 + - rusty-cachier cache upload + test-frame-examples-compile-to-wasm: # into one job stage: test @@ -293,6 +321,7 @@ test-linux-stable-int: extends: - .docker-env - .test-refs + - .pipeline-stopper-artifacts variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. @@ -309,6 +338,8 @@ test-linux-stable-int: time cargo test -p node-cli --release --verbose --locked -- --ignored - rusty-cachier cache upload +# more information about this job can be found here: +# https://github.com/paritytech/substrate/pull/6916 check-tracing: stage: test # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs @@ -320,6 +351,7 @@ check-tracing: extends: - .docker-env - .test-refs + - .pipeline-stopper-artifacts script: - rusty-cachier snapshot create # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases @@ -327,6 +359,8 @@ check-tracing: - time cargo +nightly test --manifest-path ./primitives/tracing/Cargo.toml --no-default-features --features=with-tracing - rusty-cachier cache upload +# more information about this job can be found here: +# https://github.com/paritytech/substrate/pull/3778 test-full-crypto-feature: stage: test # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs @@ -356,10 +390,14 @@ test-wasmer-sandbox: extends: - .docker-env - .test-refs-wasmer-sandbox + variables: + CI_JOB_NAME: "test-wasmer-sandbox" + parallel: 3 script: - rusty-cachier snapshot create - - time cargo test --release --features runtime-benchmarks,wasmer-sandbox,disable-ui-tests - - rusty-cachier cache upload + - echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}" + - time cargo nextest run --release --features runtime-benchmarks,wasmer-sandbox,disable-ui-tests --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} + - if [ ${CI_NODE_INDEX} == 1 ]; then rusty-cachier cache upload; fi cargo-check-macos: stage: test diff --git a/shell.nix b/shell.nix deleted file mode 100644 index c318995605a38..0000000000000 --- a/shell.nix +++ /dev/null @@ -1,27 +0,0 @@ -let - mozillaOverlay = - import (builtins.fetchGit { - url = "https://github.com/mozilla/nixpkgs-mozilla.git"; - rev = "15b7a05f20aab51c4ffbefddb1b448e862dccb7d"; - }); - nixpkgs = import { overlays = [ mozillaOverlay ]; }; - rust-nightly = with nixpkgs; ((rustChannelOf { date = "2022-04-20"; channel = "nightly"; }).rust.override { - extensions = [ "rust-src" ]; - targets = [ "wasm32-unknown-unknown" ]; - }); -in -with nixpkgs; pkgs.mkShell { - buildInputs = [ - clang - openssl.dev - pkg-config - rust-nightly - ] ++ lib.optionals stdenv.isDarwin [ - darwin.apple_sdk.frameworks.Security - ]; - - RUST_SRC_PATH = "${rust-nightly}/lib/rustlib/src/rust/src"; - LIBCLANG_PATH = "${llvmPackages.libclang.lib}/lib"; - PROTOC = "${protobuf}/bin/protoc"; - ROCKSDB_LIB_DIR = "${rocksdb}/lib"; -} diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index ce5ef2ffcc01a..f8a1f437d45e1 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -12,12 +12,12 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" hex = "0.4" serde = "1.0.136" -serde_json = "1.0.79" +serde_json = "1.0.85" sc-client-api = { version = "4.0.0-dev", path = "../../client/api" } sc-client-db = { version = "0.10.0-dev", default-features = false, features = [ "test-helpers", diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 148f34246044d..3115be58425e6 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -95,14 +95,14 @@ impl } /// Create new `TestClientBuilder` with default backend and pruning window size - pub fn with_pruning_window(keep_blocks: u32) -> Self { - let backend = Arc::new(Backend::new_test(keep_blocks, 0)); + pub fn with_pruning_window(blocks_pruning: u32) -> Self { + let backend = Arc::new(Backend::new_test(blocks_pruning, 0)); Self::with_backend(backend) } /// Create new `TestClientBuilder` with default backend and storage chain mode - pub fn with_tx_storage(keep_blocks: u32) -> Self { - let backend = Arc::new(Backend::new_test_with_tx_storage(keep_blocks, 0)); + pub fn with_tx_storage(blocks_pruning: u32) -> Self { + let backend = Arc::new(Backend::new_test_with_tx_storage(blocks_pruning, 0)); Self::with_backend(backend) } } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 1c2707b3719ad..6cea6282f5bd8 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -41,7 +41,7 @@ pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = ".. sp-finality-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/finality-grandpa" } sp-trie = { version = "6.0.0", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" } -trie-db = { version = "0.23.1", default-features = false } +trie-db = { version = "0.24.0", default-features = false } parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } sc-service = { version = "0.10.0-dev", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } sp-state-machine = { version = "0.12.0", default-features = false, path = "../../primitives/state-machine" } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index a4e4dd04fc637..3bb74c62fe157 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -29,7 +29,10 @@ use sp_std::{marker::PhantomData, prelude::*}; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{offchain::KeyTypeId, OpaqueMetadata, RuntimeDebug}; -use sp_trie::{trie_types::TrieDB, PrefixedMemoryDB, StorageProof}; +use sp_trie::{ + trie_types::{TrieDBBuilder, TrieDBMutBuilderV1}, + PrefixedMemoryDB, StorageProof, +}; use trie_db::{Trie, TrieMut}; use cfg_if::cfg_if; @@ -37,7 +40,7 @@ use frame_support::{ dispatch::RawOrigin, parameter_types, traits::{CallerTrait, ConstU32, ConstU64, CrateVersion, KeyOwnerProofSystem}, - weights::RuntimeDbWeight, + weights::{RuntimeDbWeight, Weight}, }; use frame_system::limits::{BlockLength, BlockWeights}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; @@ -60,8 +63,6 @@ use sp_runtime::{ #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -// bench on latest state. -use sp_trie::trie_types::TrieDBMutV1 as TrieDBMut; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::{AllowedSlots, AuthorityId, Slot}; @@ -238,7 +239,7 @@ impl sp_runtime::traits::Dispatchable for Extrinsic { type Info = (); type PostInfo = (); fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { - panic!("This implemention should not be used for actual dispatch."); + panic!("This implementation should not be used for actual dispatch."); } } @@ -595,7 +596,7 @@ parameter_types! { pub RuntimeBlockLength: BlockLength = BlockLength::max(4 * 1024 * 1024); pub RuntimeBlockWeights: BlockWeights = - BlockWeights::with_sensible_defaults(4 * 1024 * 1024, Perbill::from_percent(75)); + BlockWeights::with_sensible_defaults(Weight::from_ref_time(4 * 1024 * 1024), Perbill::from_percent(75)); } impl From> for Extrinsic { @@ -688,25 +689,19 @@ fn code_using_trie() -> u64 { let mut mdb = PrefixedMemoryDB::default(); let mut root = sp_std::default::Default::default(); - let _ = { - let mut t = TrieDBMut::::new(&mut mdb, &mut root); + { + let mut t = TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); for (key, value) in &pairs { if t.insert(key, value).is_err() { return 101 } } - t - }; - - if let Ok(trie) = TrieDB::::new(&mdb, &root) { - if let Ok(iter) = trie.iter() { - iter.flatten().count() as u64 - } else { - 102 - } - } else { - 103 } + + let trie = TrieDBBuilder::::new(&mdb, &root).build(); + let res = if let Ok(iter) = trie.iter() { iter.flatten().count() as u64 } else { 102 }; + + res } impl_opaque_keys! { @@ -1302,7 +1297,7 @@ fn test_read_child_storage() { fn test_witness(proof: StorageProof, root: crate::Hash) { use sp_externalities::Externalities; let db: sp_trie::MemoryDB = proof.into_memory_db(); - let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); + let backend = sp_state_machine::TrieBackendBuilder::<_, crate::Hashing>::new(db, root).build(); let mut overlay = sp_state_machine::OverlayedChanges::default(); let mut cache = sp_state_machine::StorageTransactionCache::<_, _>::default(); let mut ext = sp_state_machine::Ext::new( @@ -1379,7 +1374,8 @@ mod tests { let mut root = crate::Hash::default(); let mut mdb = sp_trie::MemoryDB::::default(); { - let mut trie = sp_trie::trie_types::TrieDBMutV1::new(&mut mdb, &mut root); + let mut trie = + sp_trie::trie_types::TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); }; @@ -1389,7 +1385,8 @@ mod tests { #[test] fn witness_backend_works() { let (db, root) = witness_backend(); - let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); + let backend = + sp_state_machine::TrieBackendBuilder::<_, crate::Hashing>::new(db, root).build(); let proof = sp_state_machine::prove_read(backend, vec![b"value3"]).unwrap(); let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index 98378309ad9c1..fa6dde5b5b57e 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" -parking_lot = "0.12.0" +parking_lot = "0.12.1" thiserror = "1.0" sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index ea26bf0c9261c..4f9d703544096 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] chrono = "0.4" clap = { version = "3.1.18", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } -comfy-table = { version = "5.0.1", default-features = false } +comfy-table = { version = "6.0.0", default-features = false } handlebars = "4.2.2" hash-db = "0.15.2" hex = "0.4.3" @@ -30,7 +30,7 @@ memory-db = "0.29.0" rand = { version = "0.8.4", features = ["small_rng"] } rand_pcg = "0.3.1" serde = "1.0.136" -serde_json = "1.0.79" +serde_json = "1.0.85" serde_nanos = "0.1.2" tempfile = "3.2.0" thiserror = "1.0.30" diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index e48a7e8b3c6f5..36215c8a0586d 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -142,7 +142,8 @@ where let weight = ConsumedWeight::decode_all(&mut raw_weight)?; // Should be divisible, but still use floats in case we ever change that. - Ok((weight.total() as f64 / WEIGHT_PER_NANOS as f64).floor() as NanoSeconds) + Ok((weight.total().ref_time() as f64 / WEIGHT_PER_NANOS.ref_time() as f64).floor() + as NanoSeconds) } /// Prints the weight info of a block to the console. diff --git a/utils/frame/benchmarking-cli/src/overhead/bench.rs b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs similarity index 70% rename from utils/frame/benchmarking-cli/src/overhead/bench.rs rename to utils/frame/benchmarking-cli/src/extrinsic/bench.rs index be7dac24021cf..27fc40fb34774 100644 --- a/utils/frame/benchmarking-cli/src/overhead/bench.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs @@ -36,8 +36,8 @@ use log::info; use serde::Serialize; use std::{marker::PhantomData, sync::Arc, time::Instant}; -use super::cmd::ExtrinsicBuilder; -use crate::shared::Stats; +use super::ExtrinsicBuilder; +use crate::shared::{StatSelect, Stats}; /// Parameters to configure an *overhead* benchmark. #[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] @@ -60,21 +60,11 @@ pub struct BenchmarkParams { /// The results of multiple runs in nano seconds. pub(crate) type BenchRecord = Vec; -/// Type of a benchmark. -#[derive(Serialize, Clone, PartialEq, Copy)] -pub(crate) enum BenchmarkType { - /// Measure the per-extrinsic execution overhead. - Extrinsic, - /// Measure the per-block execution overhead. - Block, -} - /// Holds all objects needed to run the *overhead* benchmarks. pub(crate) struct Benchmark { client: Arc, params: BenchmarkParams, inherent_data: sp_inherents::InherentData, - ext_builder: Arc, _p: PhantomData<(Block, BA)>, } @@ -90,23 +80,51 @@ where client: Arc, params: BenchmarkParams, inherent_data: sp_inherents::InherentData, - ext_builder: Arc, ) -> Self { - Self { client, params, inherent_data, ext_builder, _p: PhantomData } + Self { client, params, inherent_data, _p: PhantomData } } - /// Run the specified benchmark. - pub fn bench(&self, bench_type: BenchmarkType) -> Result { - let (block, num_ext) = self.build_block(bench_type)?; - let record = self.measure_block(&block, num_ext, bench_type)?; + /// Benchmark a block with only inherents. + pub fn bench_block(&self) -> Result { + let (block, _) = self.build_block(None)?; + let record = self.measure_block(&block)?; Stats::new(&record) } - /// Builds a block for the given benchmark type. + /// Benchmark the time of an extrinsic in a full block. + /// + /// First benchmarks an empty block, analogous to `bench_block` and use it as baseline. + /// Then benchmarks a full block built with the given `ext_builder` and subtracts the baseline + /// from the result. + /// This is necessary to account for the time the inherents use. + pub fn bench_extrinsic(&self, ext_builder: &dyn ExtrinsicBuilder) -> Result { + let (block, _) = self.build_block(None)?; + let base = self.measure_block(&block)?; + let base_time = Stats::new(&base)?.select(StatSelect::Average); + + let (block, num_ext) = self.build_block(Some(ext_builder))?; + let num_ext = num_ext.ok_or_else(|| Error::Input("Block was empty".into()))?; + let mut records = self.measure_block(&block)?; + + for r in &mut records { + // Subtract the base time. + *r = r.saturating_sub(base_time); + // Divide by the number of extrinsics in the block. + *r = ((*r as f64) / (num_ext as f64)).ceil() as u64; + } + + Stats::new(&records) + } + + /// Builds a block with some optional extrinsics. /// /// Returns the block and the number of extrinsics in the block /// that are not inherents. - fn build_block(&self, bench_type: BenchmarkType) -> Result<(Block, u64)> { + /// Returns a block with only inherents if `ext_builder` is `None`. + fn build_block( + &self, + ext_builder: Option<&dyn ExtrinsicBuilder>, + ) -> Result<(Block, Option)> { let mut builder = self.client.new_block(Default::default())?; // Create and insert the inherents. let inherents = builder.create_inherents(self.inherent_data.clone())?; @@ -114,16 +132,18 @@ where builder.push(inherent)?; } - // Return early if we just want a block with inherents and no additional extrinsics. - if bench_type == BenchmarkType::Block { - return Ok((builder.build()?.block, 0)) - } + // Return early if `ext_builder` is `None`. + let ext_builder = if let Some(ext_builder) = ext_builder { + ext_builder + } else { + return Ok((builder.build()?.block, None)) + }; // Put as many extrinsics into the block as possible and count them. info!("Building block, this takes some time..."); let mut num_ext = 0; for nonce in 0..self.max_ext_per_block() { - let ext = self.ext_builder.remark(nonce)?; + let ext = ext_builder.build(nonce)?; match builder.push(ext.clone()) { Ok(()) => {}, Err(ApplyExtrinsicFailed(Validity(TransactionValidityError::Invalid( @@ -139,20 +159,12 @@ where info!("Extrinsics per block: {}", num_ext); let block = builder.build()?.block; - Ok((block, num_ext)) + Ok((block, Some(num_ext))) } /// Measures the time that it take to execute a block or an extrinsic. - fn measure_block( - &self, - block: &Block, - num_ext: u64, - bench_type: BenchmarkType, - ) -> Result { + fn measure_block(&self, block: &Block) -> Result { let mut record = BenchRecord::new(); - if bench_type == BenchmarkType::Extrinsic && num_ext == 0 { - return Err("Cannot measure the extrinsic time of an empty block".into()) - } let genesis = BlockId::Number(Zero::zero()); info!("Running {} warmups...", self.params.warmup); @@ -176,12 +188,7 @@ where .map_err(|e| Error::Client(RuntimeApiError(e)))?; let elapsed = start.elapsed().as_nanos(); - if bench_type == BenchmarkType::Extrinsic { - // Checked for non-zero div above. - record.push((elapsed as f64 / num_ext as f64).ceil() as u64); - } else { - record.push(elapsed as u64); - } + record.push(elapsed as u64); } Ok(record) @@ -191,21 +198,3 @@ where self.params.max_ext_per_block.unwrap_or(u32::MAX) } } - -impl BenchmarkType { - /// Short name of the benchmark type. - pub(crate) fn short_name(&self) -> &'static str { - match self { - Self::Extrinsic => "extrinsic", - Self::Block => "block", - } - } - - /// Long name of the benchmark type. - pub(crate) fn long_name(&self) -> &'static str { - match self { - Self::Extrinsic => "ExtrinsicBase", - Self::Block => "BlockExecution", - } - } -} diff --git a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs new file mode 100644 index 0000000000000..6b4fd0fad6638 --- /dev/null +++ b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs @@ -0,0 +1,134 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; +use sc_client_api::Backend as ClientBackend; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_runtime::{traits::Block as BlockT, OpaqueExtrinsic}; + +use clap::{Args, Parser}; +use log::info; +use serde::Serialize; +use std::{fmt::Debug, sync::Arc}; + +use super::{ + bench::{Benchmark, BenchmarkParams}, + extrinsic_factory::ExtrinsicFactory, +}; + +/// Benchmark the execution time of different extrinsics. +/// +/// This is calculated by filling a block with a specific extrinsic and executing the block. +/// The result time is then divided by the number of extrinsics in that block. +/// +/// NOTE: The BlockExecutionWeight is ignored in this case since it +// is very small compared to the total block execution time. +#[derive(Debug, Parser)] +pub struct ExtrinsicCmd { + #[allow(missing_docs)] + #[clap(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[clap(flatten)] + pub import_params: ImportParams, + + #[allow(missing_docs)] + #[clap(flatten)] + pub params: ExtrinsicParams, +} + +/// The params for the [`ExtrinsicCmd`]. +#[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] +pub struct ExtrinsicParams { + #[clap(flatten)] + pub bench: BenchmarkParams, + + /// List all available pallets and extrinsics. + /// + /// The format is CSV with header `pallet, extrinsic`. + #[clap(long)] + pub list: bool, + + /// Pallet name of the extrinsic to benchmark. + #[clap(long, value_name = "PALLET", required_unless_present = "list")] + pub pallet: Option, + + /// Extrinsic to benchmark. + #[clap(long, value_name = "EXTRINSIC", required_unless_present = "list")] + pub extrinsic: Option, +} + +impl ExtrinsicCmd { + /// Benchmark the execution time of a specific type of extrinsic. + /// + /// The output will be printed to console. + pub fn run( + &self, + client: Arc, + inherent_data: sp_inherents::InherentData, + ext_factory: &ExtrinsicFactory, + ) -> Result<()> + where + Block: BlockT, + BA: ClientBackend, + C: BlockBuilderProvider + ProvideRuntimeApi, + C::Api: ApiExt + BlockBuilderApi, + { + // Short circuit if --list was specified. + if self.params.list { + let list: Vec = ext_factory.0.iter().map(|b| b.name()).collect(); + info!( + "Listing available extrinsics ({}):\npallet, extrinsic\n{}", + list.len(), + list.join("\n") + ); + return Ok(()) + } + + let pallet = self.params.pallet.clone().unwrap_or_default(); + let extrinsic = self.params.extrinsic.clone().unwrap_or_default(); + let ext_builder = match ext_factory.try_get(&pallet, &extrinsic) { + Some(ext_builder) => ext_builder, + None => + return Err("Unknown pallet or extrinsic. Use --list for a complete list.".into()), + }; + + let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data); + let stats = bench.bench_extrinsic(ext_builder)?; + info!( + "Executing a {}::{} extrinsic takes[ns]:\n{:?}", + ext_builder.pallet(), + ext_builder.extrinsic(), + stats + ); + + Ok(()) + } +} + +// Boilerplate +impl CliConfiguration for ExtrinsicCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } + + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) + } +} diff --git a/utils/frame/benchmarking-cli/src/extrinsic/extrinsic_factory.rs b/utils/frame/benchmarking-cli/src/extrinsic/extrinsic_factory.rs new file mode 100644 index 0000000000000..7e1a22ccd1419 --- /dev/null +++ b/utils/frame/benchmarking-cli/src/extrinsic/extrinsic_factory.rs @@ -0,0 +1,70 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Provides the [`ExtrinsicFactory`] and the [`ExtrinsicBuilder`] types. +//! Is used by the *overhead* and *extrinsic* benchmarks to build extrinsics. + +use sp_runtime::OpaqueExtrinsic; + +/// Helper to manage [`ExtrinsicBuilder`] instances. +#[derive(Default)] +pub struct ExtrinsicFactory(pub Vec>); + +impl ExtrinsicFactory { + /// Returns a builder for a pallet and extrinsic name. + /// + /// Is case in-sensitive. + pub fn try_get(&self, pallet: &str, extrinsic: &str) -> Option<&dyn ExtrinsicBuilder> { + let pallet = pallet.to_lowercase(); + let extrinsic = extrinsic.to_lowercase(); + + self.0 + .iter() + .find(|b| b.pallet() == pallet && b.extrinsic() == extrinsic) + .map(|b| b.as_ref()) + } +} + +/// Used by the benchmark to build signed extrinsics. +/// +/// The built extrinsics only need to be valid in the first block +/// who's parent block is the genesis block. +/// This assumption simplifies the generation of the extrinsics. +/// The signer should be one of the pre-funded dev accounts. +pub trait ExtrinsicBuilder { + /// Name of the pallet this builder is for. + /// + /// Should be all lowercase. + fn pallet(&self) -> &str; + + /// Name of the extrinsic this builder is for. + /// + /// Should be all lowercase. + fn extrinsic(&self) -> &str; + + /// Builds an extrinsic. + /// + /// Will be called multiple times with increasing nonces. + fn build(&self, nonce: u32) -> std::result::Result; +} + +impl dyn ExtrinsicBuilder + '_ { + /// Name of this builder in CSV format: `pallet, extrinsic`. + pub fn name(&self) -> String { + format!("{}, {}", self.pallet(), self.extrinsic()) + } +} diff --git a/utils/frame/benchmarking-cli/src/extrinsic/mod.rs b/utils/frame/benchmarking-cli/src/extrinsic/mod.rs new file mode 100644 index 0000000000000..12a09c3b1af63 --- /dev/null +++ b/utils/frame/benchmarking-cli/src/extrinsic/mod.rs @@ -0,0 +1,27 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmark the time it takes to execute a specific extrinsic. +//! This is a generalization of the *overhead* benchmark which can only measure `System::Remark` +//! extrinsics. + +pub mod bench; +pub mod cmd; +pub mod extrinsic_factory; + +pub use cmd::ExtrinsicCmd; +pub use extrinsic_factory::{ExtrinsicBuilder, ExtrinsicFactory}; diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index d0eee3d2939fc..6e4f092084ef5 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -18,6 +18,7 @@ //! Contains the root [`BenchmarkCmd`] command and exports its sub-commands. mod block; +mod extrinsic; mod machine; mod overhead; mod pallet; @@ -25,8 +26,9 @@ mod shared; mod storage; pub use block::BlockCmd; +pub use extrinsic::{ExtrinsicBuilder, ExtrinsicCmd, ExtrinsicFactory}; pub use machine::{MachineCmd, Requirements, SUBSTRATE_REFERENCE_HARDWARE}; -pub use overhead::{ExtrinsicBuilder, OverheadCmd}; +pub use overhead::OverheadCmd; pub use pallet::PalletCmd; pub use storage::StorageCmd; @@ -41,8 +43,8 @@ pub enum BenchmarkCmd { Storage(StorageCmd), Overhead(OverheadCmd), Block(BlockCmd), - #[clap(hide = true)] // Hidden until fully completed. Machine(MachineCmd), + Extrinsic(ExtrinsicCmd), } /// Unwraps a [`BenchmarkCmd`] into its concrete sub-command. @@ -58,6 +60,7 @@ macro_rules! unwrap_cmd { BenchmarkCmd::Overhead($cmd) => $code, BenchmarkCmd::Block($cmd) => $code, BenchmarkCmd::Machine($cmd) => $code, + BenchmarkCmd::Extrinsic($cmd) => $code, } } } @@ -90,9 +93,9 @@ impl CliConfiguration for BenchmarkCmd { } } - fn state_cache_size(&self) -> Result { + fn trie_cache_maximum_size(&self) -> Result> { unwrap_cmd! { - self, cmd, cmd.state_cache_size() + self, cmd, cmd.trie_cache_maximum_size() } } diff --git a/utils/frame/benchmarking-cli/src/overhead/README.md b/utils/frame/benchmarking-cli/src/overhead/README.md index 6f41e881d0572..a1da5da0d0792 100644 --- a/utils/frame/benchmarking-cli/src/overhead/README.md +++ b/utils/frame/benchmarking-cli/src/overhead/README.md @@ -1,21 +1,21 @@ # The `benchmark overhead` command -Each time an extrinsic or a block is executed, a fixed weight is charged as "execution overhead". -This is necessary since the weight that is calculated by the pallet benchmarks does not include this overhead. -The exact overhead to can vary per Substrate chain and needs to be calculated per chain. +Each time an extrinsic or a block is executed, a fixed weight is charged as "execution overhead". +This is necessary since the weight that is calculated by the pallet benchmarks does not include this overhead. +The exact overhead to can vary per Substrate chain and needs to be calculated per chain. This command calculates the exact values of these overhead weights for any Substrate chain that supports it. ## How does it work? -The benchmark consists of two parts; the [`BlockExecutionWeight`] and the [`ExtrinsicBaseWeight`]. +The benchmark consists of two parts; the [`BlockExecutionWeight`] and the [`ExtrinsicBaseWeight`]. Both are executed sequentially when invoking the command. ## BlockExecutionWeight -The block execution weight is defined as the weight that it takes to execute an *empty block*. -It is measured by constructing an empty block and measuring its executing time. -The result are written to a `block_weights.rs` file which is created from a template. -The file will contain the concrete weight value and various statistics about the measurements. For example: +The block execution weight is defined as the weight that it takes to execute an *empty block*. +It is measured by constructing an empty block and measuring its executing time. +The result are written to a `block_weights.rs` file which is created from a template. +The file will contain the concrete weight value and various statistics about the measurements. For example: ```rust /// Time to execute an empty block. /// Calculated by multiplying the *Average* with `1` and adding `0`. @@ -30,21 +30,21 @@ The file will contain the concrete weight value and various statistics about the /// 99th: 3_631_863 /// 95th: 3_595_674 /// 75th: 3_526_435 -pub const BlockExecutionWeight: Weight = 3_532_484 * WEIGHT_PER_NANOS; +pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.scalar_saturating_mul(3_532_484); ``` -In this example it takes 3.5 ms to execute an empty block. That means that it always takes at least 3.5 ms to execute *any* block. +In this example it takes 3.5 ms to execute an empty block. That means that it always takes at least 3.5 ms to execute *any* block. This constant weight is therefore added to each block to ensure that Substrate budgets enough time to execute it. ## ExtrinsicBaseWeight -The extrinsic base weight is defined as the weight that it takes to execute an *empty* extrinsic. -An *empty* extrinsic is also called a *NO-OP*. It does nothing and is the equivalent to the empty block form above. +The extrinsic base weight is defined as the weight that it takes to execute an *empty* extrinsic. +An *empty* extrinsic is also called a *NO-OP*. It does nothing and is the equivalent to the empty block form above. The benchmark now constructs a block which is filled with only NO-OP extrinsics. -This block is then executed many times and the weights are measured. -The result is divided by the number of extrinsics in that block and the results are written to `extrinsic_weights.rs`. +This block is then executed many times and the weights are measured. +The result is divided by the number of extrinsics in that block and the results are written to `extrinsic_weights.rs`. -The relevant section in the output file looks like this: +The relevant section in the output file looks like this: ```rust /// Time to execute a NO-OP extrinsic, for example `System::remark`. /// Calculated by multiplying the *Average* with `1` and adding `0`. @@ -59,10 +59,10 @@ The relevant section in the output file looks like this: /// 99th: 68_758 /// 95th: 67_843 /// 75th: 67_749 -pub const ExtrinsicBaseWeight: Weight = 67_745 * WEIGHT_PER_NANOS; +pub const ExtrinsicBaseWeight: Weight = Weight::from_ref_time(67_745 * WEIGHT_PER_NANOS); ``` -In this example it takes 67.7 µs to execute a NO-OP extrinsic. That means that it always takes at least 67.7 µs to execute *any* extrinsic. +In this example it takes 67.7 µs to execute a NO-OP extrinsic. That means that it always takes at least 67.7 µs to execute *any* extrinsic. This constant weight is therefore added to each extrinsic to ensure that Substrate budgets enough time to execute it. ## Invocation @@ -76,48 +76,48 @@ Output: ```pre # BlockExecutionWeight Running 10 warmups... -Executing block 100 times +Executing block 100 times Per-block execution overhead [ns]: Total: 353248430 Min: 3508416, Max: 3680498 Average: 3532484, Median: 3522111, Stddev: 27070.23 -Percentiles 99th, 95th, 75th: 3631863, 3595674, 3526435 +Percentiles 99th, 95th, 75th: 3631863, 3595674, 3526435 Writing weights to "block_weights.rs" # Setup -Building block, this takes some time... +Building block, this takes some time... Extrinsics per block: 12000 # ExtrinsicBaseWeight Running 10 warmups... -Executing block 100 times +Executing block 100 times Per-extrinsic execution overhead [ns]: Total: 6774590 Min: 67561, Max: 69855 Average: 67745, Median: 67701, Stddev: 264.68 -Percentiles 99th, 95th, 75th: 68758, 67843, 67749 +Percentiles 99th, 95th, 75th: 68758, 67843, 67749 Writing weights to "extrinsic_weights.rs" ``` -The complete command for Polkadot looks like this: +The complete command for Polkadot looks like this: ```sh cargo run --profile=production -- benchmark overhead --chain=polkadot-dev --execution=wasm --wasm-execution=compiled --weight-path=runtime/polkadot/constants/src/weights/ ``` -This will overwrite the the [block_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/block_weights.rs) and [extrinsic_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/extrinsic_weights.rs) files in the Polkadot runtime directory. -You can try the same for *Rococo* and to see that the results slightly differ. +This will overwrite the the [block_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/block_weights.rs) and [extrinsic_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/extrinsic_weights.rs) files in the Polkadot runtime directory. +You can try the same for *Rococo* and to see that the results slightly differ. 👉 It is paramount to use `--profile=production`, `--execution=wasm` and `--wasm-execution=compiled` as the results are otherwise useless. ## Output Interpretation -Lower is better. The less weight the execution overhead needs, the better. -Since the weights of the overhead is charged per extrinsic and per block, a larger weight results in less extrinsics per block. +Lower is better. The less weight the execution overhead needs, the better. +Since the weights of the overhead is charged per extrinsic and per block, a larger weight results in less extrinsics per block. Minimizing this is important to have a large transaction throughput. ## Arguments -- `--chain` / `--dev` Set the chain specification. -- `--weight-path` Set the output directory or file to write the weights to. +- `--chain` / `--dev` Set the chain specification. +- `--weight-path` Set the output directory or file to write the weights to. - `--repeat` Set the repetitions of both benchmarks. - `--warmup` Set the rounds of warmup before measuring. - `--execution` Should be set to `wasm` for correct results. diff --git a/utils/frame/benchmarking-cli/src/overhead/cmd.rs b/utils/frame/benchmarking-cli/src/overhead/cmd.rs index 357c89d97a5ac..28ceea63f1572 100644 --- a/utils/frame/benchmarking-cli/src/overhead/cmd.rs +++ b/utils/frame/benchmarking-cli/src/overhead/cmd.rs @@ -31,10 +31,11 @@ use serde::Serialize; use std::{fmt::Debug, sync::Arc}; use crate::{ - overhead::{ - bench::{Benchmark, BenchmarkParams, BenchmarkType}, - template::TemplateData, + extrinsic::{ + bench::{Benchmark, BenchmarkParams as ExtrinsicBenchmarkParams}, + ExtrinsicBuilder, }, + overhead::template::TemplateData, shared::{HostInfoParams, WeightParams}, }; @@ -63,20 +64,20 @@ pub struct OverheadParams { #[allow(missing_docs)] #[clap(flatten)] - pub bench: BenchmarkParams, + pub bench: ExtrinsicBenchmarkParams, #[allow(missing_docs)] #[clap(flatten)] pub hostinfo: HostInfoParams, } -/// Used by the benchmark to build signed extrinsics. -/// -/// The built extrinsics only need to be valid in the first block -/// who's parent block is the genesis block. -pub trait ExtrinsicBuilder { - /// Build a `System::remark` extrinsic. - fn remark(&self, nonce: u32) -> std::result::Result; +/// Type of a benchmark. +#[derive(Serialize, Clone, PartialEq, Copy)] +pub(crate) enum BenchmarkType { + /// Measure the per-extrinsic execution overhead. + Extrinsic, + /// Measure the per-block execution overhead. + Block, } impl OverheadCmd { @@ -89,7 +90,7 @@ impl OverheadCmd { cfg: Configuration, client: Arc, inherent_data: sp_inherents::InherentData, - ext_builder: Arc, + ext_builder: &dyn ExtrinsicBuilder, ) -> Result<()> where Block: BlockT, @@ -97,18 +98,21 @@ impl OverheadCmd { C: BlockBuilderProvider + ProvideRuntimeApi, C::Api: ApiExt + BlockBuilderApi, { - let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data, ext_builder); + if ext_builder.pallet() != "system" || ext_builder.extrinsic() != "remark" { + return Err(format!("The extrinsic builder is required to build `System::Remark` extrinsics but builds `{}` extrinsics instead", ext_builder.name()).into()); + } + let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data); // per-block execution overhead { - let stats = bench.bench(BenchmarkType::Block)?; + let stats = bench.bench_block()?; info!("Per-block execution overhead [ns]:\n{:?}", stats); let template = TemplateData::new(BenchmarkType::Block, &cfg, &self.params, &stats)?; template.write(&self.params.weight.weight_path)?; } // per-extrinsic execution overhead { - let stats = bench.bench(BenchmarkType::Extrinsic)?; + let stats = bench.bench_extrinsic(ext_builder)?; info!("Per-extrinsic execution overhead [ns]:\n{:?}", stats); let template = TemplateData::new(BenchmarkType::Extrinsic, &cfg, &self.params, &stats)?; template.write(&self.params.weight.weight_path)?; @@ -118,6 +122,24 @@ impl OverheadCmd { } } +impl BenchmarkType { + /// Short name of the benchmark type. + pub(crate) fn short_name(&self) -> &'static str { + match self { + Self::Extrinsic => "extrinsic", + Self::Block => "block", + } + } + + /// Long name of the benchmark type. + pub(crate) fn long_name(&self) -> &'static str { + match self { + Self::Extrinsic => "ExtrinsicBase", + Self::Block => "BlockExecution", + } + } +} + // Boilerplate impl CliConfiguration for OverheadCmd { fn shared_params(&self) -> &SharedParams { diff --git a/utils/frame/benchmarking-cli/src/overhead/mod.rs b/utils/frame/benchmarking-cli/src/overhead/mod.rs index abdeac22b7898..fc3db912f7a30 100644 --- a/utils/frame/benchmarking-cli/src/overhead/mod.rs +++ b/utils/frame/benchmarking-cli/src/overhead/mod.rs @@ -15,8 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod bench; pub mod cmd; -mod template; +pub mod template; -pub use cmd::{ExtrinsicBuilder, OverheadCmd}; +pub use cmd::OverheadCmd; diff --git a/utils/frame/benchmarking-cli/src/overhead/template.rs b/utils/frame/benchmarking-cli/src/overhead/template.rs index 33c2c7999039a..aa82e45cf6db9 100644 --- a/utils/frame/benchmarking-cli/src/overhead/template.rs +++ b/utils/frame/benchmarking-cli/src/overhead/template.rs @@ -27,7 +27,7 @@ use serde::Serialize; use std::{env, fs, path::PathBuf}; use crate::{ - overhead::{bench::BenchmarkType, cmd::OverheadParams}, + overhead::cmd::{BenchmarkType, OverheadParams}, shared::{Stats, UnderscoreHelper}, }; diff --git a/utils/frame/benchmarking-cli/src/overhead/weights.hbs b/utils/frame/benchmarking-cli/src/overhead/weights.hbs index f8312b0052592..d07533e9dbaa8 100644 --- a/utils/frame/benchmarking-cli/src/overhead/weights.hbs +++ b/utils/frame/benchmarking-cli/src/overhead/weights.hbs @@ -52,11 +52,12 @@ parameter_types! { /// 99th: {{underscore stats.p99}} /// 95th: {{underscore stats.p95}} /// 75th: {{underscore stats.p75}} - pub const {{long_name}}Weight: Weight = {{underscore weight}} * WEIGHT_PER_NANOS; + pub const {{long_name}}Weight: Weight = WEIGHT_PER_NANOS.scalar_saturating_mul({{underscore weight}}); } #[cfg(test)] mod test_weights { + use super::*; use frame_support::weights::constants; /// Checks that the weight exists and is sane. @@ -68,14 +69,14 @@ mod test_weights { {{#if (eq short_name "block")}} // At least 100 µs. - assert!(w >= 100 * constants::WEIGHT_PER_MICROS, "Weight should be at least 100 µs."); + assert!(w >= Weight::from_ref_time(100 * constants::WEIGHT_PER_MICROS), "Weight should be at least 100 µs."); // At most 50 ms. - assert!(w <= 50 * constants::WEIGHT_PER_MILLIS, "Weight should be at most 50 ms."); + assert!(w <= Weight::from_ref_time(50 * constants::WEIGHT_PER_MILLIS), "Weight should be at most 50 ms."); {{else}} // At least 10 µs. - assert!(w >= 10 * constants::WEIGHT_PER_MICROS, "Weight should be at least 10 µs."); + assert!(w >= Weight::from_ref_time(10 * constants::WEIGHT_PER_MICROS), "Weight should be at least 10 µs."); // At most 1 ms. - assert!(w <= constants::WEIGHT_PER_MILLIS, "Weight should be at most 1 ms."); + assert!(w <= Weight::from_ref_time(constants::WEIGHT_PER_MILLIS), "Weight should be at most 1 ms."); {{/if}} } } diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index fae5a4494cdc4..0fc7cc4d783f7 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -228,6 +228,11 @@ impl PalletCmd { let mut component_ranges = HashMap::<(Vec, Vec), Vec>::new(); for (pallet, extrinsic, components) in benchmarks_to_run { + log::info!( + "Starting benchmark: {}::{}", + String::from_utf8(pallet.clone()).expect("Encoded from String; qed"), + String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"), + ); let all_components = if components.is_empty() { vec![Default::default()] } else { @@ -236,14 +241,22 @@ impl PalletCmd { let lowest = self.lowest_range_values.get(idx).cloned().unwrap_or(*low); let highest = self.highest_range_values.get(idx).cloned().unwrap_or(*high); - let diff = highest - lowest; + let diff = + highest.checked_sub(lowest).ok_or("`low` cannot be higher than `high`")?; - // Create up to `STEPS` steps for that component between high and low. - let step_size = (diff / self.steps).max(1); - let num_of_steps = diff / step_size + 1; - for s in 0..num_of_steps { + // The slope logic needs at least two points + // to compute a slope. + if self.steps < 2 { + return Err("`steps` must be at least 2.".into()) + } + + let step_size = (diff as f32 / (self.steps - 1) as f32).max(0.0); + + for s in 0..self.steps { // This is the value we will be testing for component `name` - let component_value = lowest + step_size * s; + let component_value = ((lowest as f32 + step_size * s as f32) as u32) + .min(highest) + .max(lowest); // Select the max value for all the other components. let c: Vec<(BenchmarkParameter, u32)> = components @@ -364,13 +377,14 @@ impl PalletCmd { if elapsed >= time::Duration::from_secs(5) { timer = time::SystemTime::now(); log::info!( - "Running Benchmark: {}.{} {}/{} {}/{}", + "Running Benchmark: {}.{}({} args) {}/{} {}/{}", String::from_utf8(pallet.clone()) .expect("Encoded from String; qed"), String::from_utf8(extrinsic.clone()) .expect("Encoded from String; qed"), - s + 1, // s starts at 0. todo show step - self.steps, + components.len(), + s + 1, // s starts at 0. + all_components.len(), r + 1, self.external_repeat, ); diff --git a/utils/frame/benchmarking-cli/src/pallet/template.hbs b/utils/frame/benchmarking-cli/src/pallet/template.hbs index 688ad4d3934f5..bf18e23367bc9 100644 --- a/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -15,7 +15,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::Weight}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight}}; use sp_std::marker::PhantomData; /// Weight functions for `{{pallet}}`. @@ -33,22 +33,22 @@ impl {{pallet}}::WeightInfo for WeightInfo { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { - ({{underscore benchmark.base_weight}} as Weight) + Weight::from_ref_time({{underscore benchmark.base_weight}} as RefTimeWeight) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).scalar_saturating_mul({{cw.name}} as RefTimeWeight)) {{/each}} {{#if (ne benchmark.base_reads "0")}} - .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as RefTimeWeight)) {{/if}} {{#each benchmark.component_reads as |cr|}} - .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as RefTimeWeight).saturating_mul({{cr.name}} as RefTimeWeight))) {{/each}} {{#if (ne benchmark.base_writes "0")}} - .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as RefTimeWeight)) {{/if}} {{#each benchmark.component_writes as |cw|}} - .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight))) {{/each}} } {{/each}} diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index b8264dc009232..de5e189b40db0 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -24,7 +24,7 @@ use sp_core::storage::StorageKey; use sp_database::{ColumnId, Database}; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_state_machine::Storage; -use sp_storage::StateVersion; +use sp_storage::{ChildInfo, ChildType, PrefixedStorageKey, StateVersion}; use clap::{Args, Parser}; use log::info; @@ -96,9 +96,15 @@ pub struct StorageParams { #[clap(long, possible_values = ["0", "1"])] pub state_version: u8, - /// State cache size. - #[clap(long, default_value = "0")] - pub state_cache_size: usize, + /// Trie cache size in bytes. + /// + /// Providing `0` will disable the cache. + #[clap(long, default_value = "1024")] + pub trie_cache_size: usize, + + /// Include child trees in benchmark. + #[clap(long)] + pub include_child_trees: bool, } impl StorageCmd { @@ -155,6 +161,16 @@ impl StorageCmd { } } + /// Returns Some if child node and None if regular + pub(crate) fn is_child_key(&self, key: Vec) -> Option { + if let Some((ChildType::ParentKeyId, storage_key)) = + ChildType::from_prefixed_key(&PrefixedStorageKey::new(key)) + { + return Some(ChildInfo::new_default(storage_key)) + } + None + } + /// Run some rounds of the (read) benchmark as warmup. /// See `frame_benchmarking_cli::storage::read::bench_read` for detailed comments. fn bench_warmup(&self, client: &Arc) -> Result<()> @@ -171,7 +187,7 @@ impl StorageCmd { for i in 0..self.params.warmups { info!("Warmup round {}/{}", i + 1, self.params.warmups); - for key in keys.clone() { + for key in keys.as_slice() { let _ = client .storage(&block, &key) .expect("Checked above to exist") @@ -197,7 +213,11 @@ impl CliConfiguration for StorageCmd { Some(&self.pruning_params) } - fn state_cache_size(&self) -> Result { - Ok(self.params.state_cache_size) + fn trie_cache_maximum_size(&self) -> Result> { + if self.params.trie_cache_size == 0 { + Ok(None) + } else { + Ok(Some(self.params.trie_cache_size)) + } } } diff --git a/utils/frame/benchmarking-cli/src/storage/read.rs b/utils/frame/benchmarking-cli/src/storage/read.rs index c1dc6daba0953..cba318f87ea98 100644 --- a/utils/frame/benchmarking-cli/src/storage/read.rs +++ b/utils/frame/benchmarking-cli/src/storage/read.rs @@ -50,16 +50,43 @@ impl StorageCmd { let (mut rng, _) = new_rng(None); keys.shuffle(&mut rng); + let mut child_nodes = Vec::new(); // Interesting part here: // Read all the keys in the database and measure the time it takes to access each. info!("Reading {} keys", keys.len()); - for key in keys.clone() { - let start = Instant::now(); - let v = client - .storage(&block, &key) - .expect("Checked above to exist") - .ok_or("Value unexpectedly empty")?; - record.append(v.0.len(), start.elapsed())?; + for key in keys.as_slice() { + match (self.params.include_child_trees, self.is_child_key(key.clone().0)) { + (true, Some(info)) => { + // child tree key + let child_keys = client.child_storage_keys(&block, &info, &empty_prefix)?; + for ck in child_keys { + child_nodes.push((ck.clone(), info.clone())); + } + }, + _ => { + // regular key + let start = Instant::now(); + let v = client + .storage(&block, &key) + .expect("Checked above to exist") + .ok_or("Value unexpectedly empty")?; + record.append(v.0.len(), start.elapsed())?; + }, + } + } + + if self.params.include_child_trees { + child_nodes.shuffle(&mut rng); + + info!("Reading {} child keys", child_nodes.len()); + for (key, info) in child_nodes.as_slice() { + let start = Instant::now(); + let v = client + .child_storage(&block, info, key) + .expect("Checked above to exist") + .ok_or("Value unexpectedly empty")?; + record.append(v.0.len(), start.elapsed())?; + } } Ok(record) } diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index ab25109a35d49..9a3821a7095f8 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -16,8 +16,8 @@ // limitations under the License. use sc_cli::Result; -use sc_client_api::UsageProvider; -use sc_client_db::{DbHash, DbState}; +use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider}; +use sc_client_db::{DbHash, DbState, DbStateBuilder}; use sp_api::StateBackend; use sp_blockchain::HeaderBackend; use sp_database::{ColumnId, Transaction}; @@ -29,7 +29,12 @@ use sp_trie::PrefixedMemoryDB; use log::{info, trace}; use rand::prelude::*; -use std::{fmt::Debug, sync::Arc, time::Instant}; +use sp_storage::{ChildInfo, StateVersion}; +use std::{ + fmt::Debug, + sync::Arc, + time::{Duration, Instant}, +}; use super::cmd::StorageCmd; use crate::shared::{new_rng, BenchRecord}; @@ -37,7 +42,7 @@ use crate::shared::{new_rng, BenchRecord}; impl StorageCmd { /// Benchmarks the time it takes to write a single Storage item. /// Uses the latest state that is available for the given client. - pub(crate) fn bench_write( + pub(crate) fn bench_write( &self, client: Arc, (db, state_col): (Arc>, ColumnId), @@ -46,7 +51,8 @@ impl StorageCmd { where Block: BlockT