diff --git a/.github/workflows/check-labels.yml b/.github/workflows/check-labels.yml index ee03075176995..7180e7b509662 100644 --- a/.github/workflows/check-labels.yml +++ b/.github/workflows/check-labels.yml @@ -9,8 +9,13 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + with: + fetch-depth: 0 + ref: ${{ github.event.pull_request.head.ref }} + repository: ${{ github.event.pull_request.head.repo.full_name }} - name: Check labels run: bash ${{ github.workspace }}/.maintain/github/check_labels.sh env: GITHUB_PR: ${{ github.event.pull_request.number }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + HEAD_SHA: ${{ github.event.pull_request.head.sha }} diff --git a/.gitignore b/.gitignore index ce302c74e10a0..0486a1a716e5c 100644 --- a/.gitignore +++ b/.gitignore @@ -24,3 +24,4 @@ rls*.log .cargo/ .cargo-remote.toml *.bin +*.iml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a7eedee9aa24a..e9f17f54503f4 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -119,6 +119,11 @@ default: - if: $CI_COMMIT_REF_NAME == "tags" - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 +.nightly-pipeline: &nightly-pipeline + rules: + # this job runs only on nightly pipeline with the mentioned variable, against `master` branch + - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly" + #### stage: .pre skip-if-draft: @@ -199,17 +204,7 @@ test-prometheus-alerting-rules: cargo-deny: stage: test <<: *docker-env - rules: - - if: $CI_PIPELINE_SOURCE == "pipeline" - when: never - - changes: - - "Cargo.lock" - - "**/Cargo.toml" - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + <<: *nightly-pipeline script: - cargo deny check --hide-inclusion-graph -c .maintain/deny.toml after_script: @@ -277,6 +272,7 @@ test-linux-stable: &test-linux script: # this job runs all tests in former runtime-benchmarks, frame-staking and wasmtime tests - time cargo test --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml + - time cargo test -p frame-support-test --features=conditional-storage --manifest-path frame/support/test/Cargo.toml - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout - sccache -s @@ -340,7 +336,7 @@ check-web-wasm: # Note: we don't need to test crates imported in `bin/node/cli` - time cargo build --manifest-path=client/consensus/aura/Cargo.toml --target=wasm32-unknown-unknown --features getrandom # Note: the command below is a bit weird because several Cargo issues prevent us from compiling the node in a more straight-forward way. - - time cargo +nightly build --manifest-path=bin/node/cli/Cargo.toml --no-default-features --features browser --target=wasm32-unknown-unknown -Z features=itarget + - time cargo +nightly build --manifest-path=bin/node/cli/Cargo.toml --no-default-features --features browser --target=wasm32-unknown-unknown # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases - time cargo +nightly test --manifest-path primitives/tracing/Cargo.toml --no-default-features - time cargo +nightly test --manifest-path primitives/tracing/Cargo.toml --no-default-features --features=with-tracing @@ -411,7 +407,7 @@ test-browser-node: CARGO_TARGET_WASM32_UNKNOWN_UNKNOWN_RUNNER: "wasm-bindgen-test-runner" WASM_BINDGEN_TEST_TIMEOUT: 120 script: - - cargo +nightly test --target wasm32-unknown-unknown -p node-browser-testing -Z features=itarget + - cargo +nightly test --target wasm32-unknown-unknown -p node-browser-testing build-linux-substrate: &build-binary stage: build @@ -523,6 +519,10 @@ build-rust-doc: - buildah info - buildah push --format=v2s2 "$IMAGE_NAME:$VERSION" - buildah push --format=v2s2 "$IMAGE_NAME:latest" + # pass artifacts to the trigget-simnet job + - echo "VERSION=${VERSION}" > build.env + - echo "TRIGGERER=${CI_PROJECT_NAME}" >> build.env + after_script: - buildah logout "$IMAGE_NAME" publish-docker-substrate: @@ -534,8 +534,6 @@ publish-docker-substrate: variables: <<: *docker-build-vars PRODUCT: substrate - after_script: - - echo "VERSION=${VERSION}" >> build.env artifacts: reports: # this artifact is used in trigger-simnet job @@ -653,9 +651,7 @@ deploy-prometheus-alerting-rules: trigger-simnet: stage: deploy - rules: - # this job runs only on nightly pipeline with the mentioned variable, against `master` branch - - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly" + <<: *nightly-pipeline needs: - job: publish-docker-substrate trigger: diff --git a/.maintain/common/lib.sh b/.maintain/common/lib.sh index 1d4be0ecc7296..ce6c566d799ab 100755 --- a/.maintain/common/lib.sh +++ b/.maintain/common/lib.sh @@ -82,7 +82,7 @@ has_label(){ # Formats a message into a JSON string for posting to Matrix # message: 'any plaintext message' -# formatted_message: 'optional message formatted in html' +# formatted_message: 'optional message formatted in html' # Usage: structure_message $content $formatted_content (optional) structure_message() { if [ -z "$2" ]; then @@ -101,3 +101,17 @@ structure_message() { send_message() { curl -XPOST -d "$1" "https://matrix.parity.io/_matrix/client/r0/rooms/$2/send/m.room.message?access_token=$3" } + +# Check for runtime changes between two commits. This is defined as any changes +# to bin/node/src/runtime, frame/ and primitives/sr_* trees. +has_runtime_changes() { + from=$1 + to=$2 + if git diff --name-only "${from}...${to}" \ + | grep -q -e '^frame/' -e '^primitives/' + then + return 0 + else + return 1 + fi +} diff --git a/.maintain/github/check_labels.sh b/.maintain/github/check_labels.sh index 75190db6683fa..7b0aed9fe7345 100755 --- a/.maintain/github/check_labels.sh +++ b/.maintain/github/check_labels.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +set -e #shellcheck source=../common/lib.sh source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh" @@ -24,10 +25,18 @@ releasenotes_labels=( ) criticality_labels=( - 'C1-low' - 'C3-medium' - 'C7-high' - 'C9-critical' + 'C1-low 📌' + 'C3-medium 📣' + 'C7-high ❗️' + 'C9-critical ‼️' +) + +audit_labels=( + 'D1-audited 👍' + 'D2-notlive 💤' + 'D3-trivial 🧸' + 'D5-nicetohaveaudit ⚠️' + 'D9-needsaudit 👮' ) echo "[+] Checking release notes (B) labels" @@ -46,4 +55,14 @@ else exit 1 fi +if has_runtime_changes origin/master "${HEAD_SHA}"; then + echo "[+] Runtime changes detected. Checking audit (D) labels" + if ensure_labels "${audit_labels[@]}"; then + echo "[+] Release audit label detected. All is well." + else + echo "[!] Release audit label not detected. Please add one of: ${audit_labels[*]}" + exit 1 + fi +fi + exit 0 diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index bf8fbf5aaf410..c1fd7365237de 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -86,10 +86,9 @@ else fi # Patch all Substrate crates in Polkadot -diener patch --crates-to-patch ../ --substrate +diener patch --crates-to-patch ../ --substrate --path Cargo.toml # Test Polkadot pr or master branch with this Substrate commit. -cargo update -p sp-io time cargo test --all --release --verbose --features=real-overseer cd parachain/test-parachains/adder/collator/ diff --git a/.maintain/gitlab/check_polkadot_companion_status.sh b/.maintain/gitlab/check_polkadot_companion_status.sh index 4714baf54fb2f..e0412c7b7bec7 100755 --- a/.maintain/gitlab/check_polkadot_companion_status.sh +++ b/.maintain/gitlab/check_polkadot_companion_status.sh @@ -56,7 +56,7 @@ fi boldprint "companion pr: #${pr_companion}" # check the status of that pull request - needs to be -# mergable and approved +# approved and mergable curl -H "${github_header}" -sS -o companion_pr.json \ ${github_api_polkadot_pull_url}/${pr_companion} @@ -64,20 +64,6 @@ curl -H "${github_header}" -sS -o companion_pr.json \ pr_head_sha=$(jq -r -e '.head.sha' < companion_pr.json) boldprint "Polkadot PR's HEAD SHA: $pr_head_sha" -if jq -e .merged < companion_pr.json >/dev/null -then - boldprint "polkadot pr #${pr_companion} already merged" - exit 0 -fi - -if jq -e '.mergeable' < companion_pr.json >/dev/null -then - boldprint "polkadot pr #${pr_companion} mergeable" -else - boldprint "polkadot pr #${pr_companion} not mergeable" - exit 1 -fi - curl -H "${github_header}" -sS -o companion_pr_reviews.json \ ${github_api_polkadot_pull_url}/${pr_companion}/reviews @@ -98,6 +84,19 @@ if [ -z "$(jq -r -e '.[].state | select(. == "APPROVED")' < companion_pr_reviews fi boldprint "polkadot pr #${pr_companion} state APPROVED" -exit 0 +if jq -e .merged < companion_pr.json >/dev/null +then + boldprint "polkadot pr #${pr_companion} already merged" + exit 0 +fi + +if jq -e '.mergeable' < companion_pr.json >/dev/null +then + boldprint "polkadot pr #${pr_companion} mergeable" +else + boldprint "polkadot pr #${pr_companion} not mergeable" + exit 1 +fi +exit 0 diff --git a/.maintain/gitlab/check_runtime.sh b/.maintain/gitlab/check_runtime.sh index 6d009c5aafc6a..af392e1b7d118 100755 --- a/.maintain/gitlab/check_runtime.sh +++ b/.maintain/gitlab/check_runtime.sh @@ -8,12 +8,13 @@ set -e # fail on any error - +#shellcheck source=../common/lib.sh +. "$(dirname "${0}")/../common/lib.sh" VERSIONS_FILE="bin/node/runtime/src/lib.rs" -boldprint () { printf "|\n| \033[1m${@}\033[0m\n|\n" ; } -boldcat () { printf "|\n"; while read l; do printf "| \033[1m${l}\033[0m\n"; done; printf "|\n" ; } +boldprint () { printf "|\n| \033[1m%s\033[0m\n|\n" "${@}"; } +boldcat () { printf "|\n"; while read -r l; do printf "| \033[1m%s\033[0m\n" "${l}"; done; printf "|\n" ; } github_label () { echo @@ -23,7 +24,7 @@ github_label () { -F "ref=master" \ -F "variables[LABEL]=${1}" \ -F "variables[PRNO]=${CI_COMMIT_REF_NAME}" \ - ${GITLAB_API}/projects/${GITHUB_API_PROJECT}/trigger/pipeline + "${GITLAB_API}/projects/${GITHUB_API_PROJECT}/trigger/pipeline" } @@ -31,16 +32,14 @@ boldprint "latest 10 commits of ${CI_COMMIT_REF_NAME}" git log --graph --oneline --decorate=short -n 10 boldprint "make sure the master branch and release tag are available in shallow clones" -git fetch --depth=${GIT_DEPTH:-100} origin master -git fetch --depth=${GIT_DEPTH:-100} origin release +git fetch --depth="${GIT_DEPTH:-100}" origin master +git fetch --depth="${GIT_DEPTH:-100}" origin release git tag -f release FETCH_HEAD git log -n1 release boldprint "check if the wasm sources changed" -if ! git diff --name-only origin/master...${CI_COMMIT_SHA} \ - | grep -v -e '^primitives/sr-arithmetic/fuzzer' \ - | grep -q -e '^bin/node/src/runtime' -e '^frame/' -e '^primitives/sr-' +if ! has_runtime_changes origin/master "${CI_COMMIT_SHA}" then boldcat <<-EOT @@ -57,9 +56,9 @@ fi # consensus-critical logic that has changed. the runtime wasm blobs must be # rebuilt. -add_spec_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ +add_spec_version="$(git diff "tags/release...${CI_COMMIT_SHA}" "${VERSIONS_FILE}" \ | sed -n -r "s/^\+[[:space:]]+spec_version: +([0-9]+),$/\1/p")" -sub_spec_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ +sub_spec_version="$(git diff "tags/release...${CI_COMMIT_SHA}" "${VERSIONS_FILE}" \ | sed -n -r "s/^\-[[:space:]]+spec_version: +([0-9]+),$/\1/p")" @@ -67,8 +66,6 @@ sub_spec_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ if [ "${add_spec_version}" != "${sub_spec_version}" ] then - github_label "D2-breaksapi" - boldcat <<-EOT changes to the runtime sources and changes in the spec version. @@ -82,9 +79,9 @@ else # check for impl_version updates: if only the impl versions changed, we assume # there is no consensus-critical logic that has changed. - add_impl_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ + add_impl_version="$(git diff "tags/release...${CI_COMMIT_SHA}" "${VERSIONS_FILE}" \ | sed -n -r 's/^\+[[:space:]]+impl_version: +([0-9]+),$/\1/p')" - sub_impl_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ + sub_impl_version="$(git diff "tags/release...${CI_COMMIT_SHA}" "${VERSIONS_FILE}" \ | sed -n -r 's/^\-[[:space:]]+impl_version: +([0-9]+),$/\1/p')" diff --git a/.maintain/monitoring/alerting-rules/alerting-rules.yaml b/.maintain/monitoring/alerting-rules/alerting-rules.yaml index 5ee2376677677..1aed87ad84f88 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rules.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rules.yaml @@ -135,7 +135,7 @@ groups: - alert: ContinuousTaskEnded expr: '(polkadot_tasks_spawned_total{task_name != "basic-authorship-proposer"} == 1) - - on(instance, task_name) (polkadot_tasks_ended_total == 1)' + - on(instance, task_name) group_left() (polkadot_tasks_ended_total == 1)' for: 5m labels: severity: warning diff --git a/.maintain/node-template-release.sh b/.maintain/node-template-release.sh index 1a6c245320593..fd470a3dce17a 100755 --- a/.maintain/node-template-release.sh +++ b/.maintain/node-template-release.sh @@ -10,7 +10,7 @@ if [ "$#" -ne 1 ]; then exit 1 fi -PATH_TO_ARCHIVE=$(pwd)/$1 +PATH_TO_ARCHIVE=$1 cd $PROJECT_ROOT/.maintain/node-template-release cargo run $PROJECT_ROOT/bin/node-template $PATH_TO_ARCHIVE diff --git a/.maintain/node-template-release/Cargo.toml b/.maintain/node-template-release/Cargo.toml index dd3166d58ddf4..c1d9f2da7faea 100644 --- a/.maintain/node-template-release/Cargo.toml +++ b/.maintain/node-template-release/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-template-release" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" diff --git a/Cargo.lock b/Cargo.lock index 3c8900a790f3b..f038ec8bed2dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,9 +23,9 @@ dependencies = [ [[package]] name = "adler" -version = "0.2.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aead" @@ -131,9 +131,9 @@ dependencies = [ [[package]] name = "arbitrary" -version = "0.4.7" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db55d72333851e17d572bec876e390cd3b11eb1ef53ae821dd9f3b653d2b4569" +checksum = "698b65a961a9d730fb45b6b0327e20207810c9f61ee421b082b27ba003f49e2b" [[package]] name = "arrayref" @@ -177,10 +177,11 @@ dependencies = [ [[package]] name = "assert_cmd" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dc1679af9a1ab4bea16f228b05d18f8363f8327b1fa8db00d2760cfafc6b61e" +checksum = "f2475b58cd94eb4f70159f4fd8844ba3b807532fe3131b3373fae060bbe30396" dependencies = [ + "bstr", "doc-comment", "predicates", "predicates-core", @@ -190,9 +191,9 @@ dependencies = [ [[package]] name = "assert_matches" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "695579f0f2520f3774bb40461e5adb066459d4e0af4d59d20175484fb8e9edf1" +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-attributes" @@ -206,9 +207,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "1.5.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59740d83946db6a5af71ae25ddf9562c2b176b2ca42cf99a455f09f4a220d6b9" +checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" dependencies = [ "concurrent-queue", "event-listener", @@ -285,13 +286,13 @@ dependencies = [ [[package]] name = "async-process" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8cea09c1fb10a317d1b5af8024eeba256d6554763e85ecd90ff8df31c7bbda" +checksum = "ef37b86e2fa961bae5a4d212708ea0154f904ce31d1a4a7f47e1bbc33a0c040b" dependencies = [ "async-io", "blocking", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "event-listener", "futures-lite", "once_cell", @@ -311,7 +312,7 @@ dependencies = [ "async-io", "async-lock", "async-process", - "crossbeam-utils 0.8.1", + "crossbeam-utils 0.8.3", "futures-channel", "futures-core", "futures-io", @@ -322,12 +323,26 @@ dependencies = [ "memchr", "num_cpus", "once_cell", - "pin-project-lite 0.2.5", + "pin-project-lite 0.2.6", "pin-utils", "slab", "wasm-bindgen-futures", ] +[[package]] +name = "async-std-resolver" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665c56111e244fe38e7708ee10948a4356ad6a548997c21f5a63a0f4e0edc4d" +dependencies = [ + "async-std", + "async-trait", + "futures-io", + "futures-util", + "pin-utils", + "trust-dns-resolver", +] + [[package]] name = "async-task" version = "4.0.3" @@ -336,9 +351,9 @@ checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-trait" -version = "0.1.42" +version = "0.1.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" +checksum = "7e098e9c493fdf92832223594d9a164f96bdf17ba81a42aff86f85c76768726a" dependencies = [ "proc-macro2", "quote", @@ -355,7 +370,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.5", + "pin-project-lite 0.2.6", ] [[package]] @@ -368,7 +383,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.5", + "pin-project-lite 0.2.6", ] [[package]] @@ -443,9 +458,9 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] name = "bincode" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" +checksum = "d175dfa69e619905c4c3cdb7c3c203fa3bdd5d51184e3afdb2742c0280493772" dependencies = [ "byteorder", "serde", @@ -483,12 +498,22 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "bitvec" -version = "0.20.1" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" +dependencies = [ + "either", + "radium 0.3.0", +] + +[[package]] +name = "bitvec" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5011ffc90248764d7005b0e10c7294f5aa1bd87d9dd7248f4ad475b347c294d" +checksum = "1f682656975d3a682daff957be4ddeb65d6ad656737cd821f2d00685ae466af1" dependencies = [ "funty", - "radium", + "radium 0.6.2", "tap", "wyz", ] @@ -560,7 +585,7 @@ dependencies = [ "block-padding 0.1.5", "byte-tools", "byteorder", - "generic-array 0.12.3", + "generic-array 0.12.4", ] [[package]] @@ -619,9 +644,9 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "473fc6b38233f9af7baa94fb5852dca389e3d95b8e21c8e3719301462c5d9faf" +checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d" dependencies = [ "lazy_static", "memchr", @@ -640,9 +665,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.6.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099e596ef14349721d9016f6b80dd3419ea1bf289ab9b44df8e4dfd3a005d5d9" +checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" + +[[package]] +name = "byte-slice-cast" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" [[package]] name = "byte-slice-cast" @@ -658,9 +689,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "byteorder" -version = "1.4.2" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" [[package]] name = "bytes" @@ -724,9 +755,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" dependencies = [ "jobserver", ] @@ -806,9 +837,9 @@ dependencies = [ [[package]] name = "cid" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d88f30b1e74e7063df5711496f3ee6e74a9735d62062242d70cddf77717f18e" +checksum = "ff0e3bc0b6446b3f9663c1a6aba6ef06c5aeaa1bc92bd18077be337198ab9768" dependencies = [ "multibase", "multihash", @@ -887,12 +918,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "const_fn" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" - [[package]] name = "constant_time_eq" version = "0.1.5" @@ -1085,7 +1110,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.1", + "crossbeam-utils 0.8.3", ] [[package]] @@ -1106,8 +1131,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.1", - "crossbeam-utils 0.8.1", + "crossbeam-epoch 0.9.3", + "crossbeam-utils 0.8.3", ] [[package]] @@ -1127,13 +1152,12 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" +checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" dependencies = [ "cfg-if 1.0.0", - "const_fn", - "crossbeam-utils 0.8.1", + "crossbeam-utils 0.8.3", "lazy_static", "memoffset 0.6.1", "scopeguard", @@ -1163,9 +1187,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" +checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" dependencies = [ "autocfg", "cfg-if 1.0.0", @@ -1184,7 +1208,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" dependencies = [ - "generic-array 0.12.3", + "generic-array 0.12.4", "subtle 1.0.0", ] @@ -1200,9 +1224,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d58633299b24b515ac72a3f869f8b91306a3cec616a602843a383acd6f9e97" +checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" dependencies = [ "bstr", "csv-core", @@ -1231,9 +1255,9 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10bcb9d7dcbf7002aaffbb53eac22906b64cdcc127971dcc387d8eb7c95d5560" +checksum = "e8f45d9ad417bcef4817d614a501ab55cdd96a6fdb24f49aab89a54acfd66b19" dependencies = [ "quote", "syn", @@ -1325,7 +1349,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" dependencies = [ - "generic-array 0.12.3", + "generic-array 0.12.4", ] [[package]] @@ -1456,6 +1480,18 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +[[package]] +name = "enum-as-inner" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "enumflags2" version = "0.6.4" @@ -1491,9 +1527,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26ecb66b4bdca6c1409b40fb255eefc2bd4f6d135dab3c3124f80ffa2a9661e" +checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" dependencies = [ "atty", "humantime 2.1.0", @@ -1550,7 +1586,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", ] [[package]] @@ -1622,11 +1658,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6447e2f8178843749e8c8003206def83ec124a7859475395777a28b5338647c" dependencies = [ "either", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "rand 0.8.3", ] @@ -1672,14 +1708,14 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" name = "fork-tree" version = "3.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", ] [[package]] name = "form_urlencoded" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" dependencies = [ "matches", "percent-encoding 2.1.0", @@ -1694,7 +1730,7 @@ dependencies = [ "hex-literal", "linregress", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "paste 1.0.4", "serde", "sp-api", @@ -1713,7 +1749,7 @@ dependencies = [ "chrono", "frame-benchmarking", "handlebars", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-cli", "sc-client-db", "sc-executor", @@ -1727,6 +1763,19 @@ dependencies = [ "structopt", ] +[[package]] +name = "frame-election-provider-support" +version = "3.0.0" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec 2.0.1", + "sp-arithmetic", + "sp-npos-elections", + "sp-runtime", + "sp-std", +] + [[package]] name = "frame-executive" version = "3.0.0" @@ -1737,7 +1786,7 @@ dependencies = [ "pallet-balances", "pallet-indices", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -1751,7 +1800,7 @@ dependencies = [ name = "frame-metadata" version = "13.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-std", @@ -1768,7 +1817,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "once_cell", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "paste 1.0.4", "pretty_assertions", @@ -1823,7 +1872,7 @@ dependencies = [ "frame-metadata", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "pretty_assertions", "rustversion", "serde", @@ -1844,7 +1893,7 @@ dependencies = [ "frame-support", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-externalities", @@ -1862,7 +1911,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -1874,7 +1923,7 @@ dependencies = [ name = "frame-system-rpc-runtime-api" version = "3.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", ] @@ -1883,7 +1932,7 @@ name = "frame-try-runtime" version = "0.9.0" dependencies = [ "frame-support", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-runtime", "sp-std", @@ -1891,9 +1940,9 @@ dependencies = [ [[package]] name = "fs-swap" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5839fda247e24ca4919c87c71dd5ca658f1f39e4f06829f80e3f15c3bafcfc2c" +checksum = "03d47dad3685eceed8488986cad3d5027165ea5edb164331770e2059555f10a5" dependencies = [ "lazy_static", "libc", @@ -1901,6 +1950,16 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi 0.3.9", +] + [[package]] name = "fs_extra" version = "1.2.0" @@ -1937,15 +1996,15 @@ checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" [[package]] name = "futures" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c7e4c2612746b0df8fed4ce0c69156021b704c9aefa360311c04e6e9e002eed" +checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" +checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1" dependencies = [ "futures-channel", "futures-core", @@ -1958,9 +2017,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" +checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" dependencies = [ "futures-core", "futures-sink", @@ -1968,9 +2027,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" +checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" [[package]] name = "futures-cpupool" @@ -1978,7 +2037,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "num_cpus", ] @@ -1988,8 +2047,8 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdcef58a173af8148b182684c9f2d5250875adbcaff7b5794073894f9d8634a9" dependencies = [ - "futures 0.1.30", - "futures 0.3.12", + "futures 0.1.31", + "futures 0.3.13", "lazy_static", "log", "parking_lot 0.9.0", @@ -2000,9 +2059,9 @@ dependencies = [ [[package]] name = "futures-executor" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" +checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1" dependencies = [ "futures-core", "futures-task", @@ -2012,9 +2071,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" +checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" [[package]] name = "futures-lite" @@ -2027,15 +2086,15 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.5", + "pin-project-lite 0.2.6", "waker-fn", ] [[package]] name = "futures-macro" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" +checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -2056,18 +2115,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" +checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" [[package]] name = "futures-task" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" -dependencies = [ - "once_cell", -] +checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" [[package]] name = "futures-timer" @@ -2087,11 +2143,11 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" +checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "futures-channel", "futures-core", "futures-io", @@ -2099,7 +2155,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.5", + "pin-project-lite 0.2.6", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -2114,18 +2170,18 @@ checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" [[package]] name = "generic-array" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" dependencies = [ "typenum", ] [[package]] name = "generic-array" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ed1e761351b56f54eb9dcd0cfaca9fd0daecf93918e1cfc01c8a3d26ee7adcd" +checksum = "f797e67af32588215eaaab8327027ee8e71b9dd0b2b26996aedf20c030fce309" dependencies = [ "typenum", ] @@ -2228,7 +2284,7 @@ dependencies = [ "byteorder", "bytes 0.4.12", "fnv", - "futures 0.1.30", + "futures 0.1.31", "http 0.1.21", "indexmap", "log", @@ -2265,9 +2321,9 @@ checksum = "62aca2aba2d62b4a7f5b33f3712cb1b0692779a56fb510499d5c0aa594daeaf3" [[package]] name = "handlebars" -version = "3.5.2" +version = "3.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "964d0e99a61fe9b1b347389b77ebf8b7e1587b70293676aaca7d27e59b9073b2" +checksum = "cdb0867bbc5a3da37a753e78021d5fcf8a4db00e18dd2dd90fd36e24190e162d" dependencies = [ "log", "pest", @@ -2321,9 +2377,9 @@ dependencies = [ [[package]] name = "hex" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" @@ -2364,21 +2420,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" dependencies = [ "digest 0.8.1", - "generic-array 0.12.3", + "generic-array 0.12.4", "hmac 0.7.1", ] [[package]] name = "honggfuzz" -version = "0.5.52" +version = "0.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ead88897bcad1c396806d6ccba260a0363e11da997472e9e19ab9889969083a2" +checksum = "bea09577d948a98a5f59b7c891e274c4fb35ad52f67782b3d0cb53b9c05301f1" dependencies = [ "arbitrary", "lazy_static", "memmap", ] +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi 0.3.9", +] + [[package]] name = "http" version = "0.1.21" @@ -2408,7 +2475,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "http 0.1.21", "tokio-buf", ] @@ -2425,9 +2492,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.3.4" +version = "1.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" [[package]] name = "httpdate" @@ -2452,12 +2519,12 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.12.35" +version = "0.12.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" +checksum = "5c843caf6296fc1f93444735205af9ed4e109a539005abb2564ae1d6fad34c52" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "futures-cpupool", "h2 0.1.26", "http 0.1.21", @@ -2482,9 +2549,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.13.9" +version = "0.13.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" +checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" dependencies = [ "bytes 0.5.6", "futures-channel", @@ -2496,8 +2563,8 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project 1.0.4", - "socket2", + "pin-project 1.0.5", + "socket2 0.3.19", "tokio 0.2.25", "tower-service", "tracing", @@ -2513,7 +2580,7 @@ dependencies = [ "bytes 0.5.6", "ct-logs", "futures-util", - "hyper 0.13.9", + "hyper 0.13.10", "log", "rustls 0.18.1", "rustls-native-certs", @@ -2535,9 +2602,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" dependencies = [ "matches", "unicode-bidi", @@ -2567,12 +2634,12 @@ dependencies = [ [[package]] name = "if-watch" -version = "0.1.8" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b8538953a3f0d0d3868f0a706eb4273535e10d72acb5c82c1c23ae48835c85" +checksum = "6a6d52908d4ea4ab2bc22474ba149bf1011c8e2c3ebc1ff593ae28ac44f494b6" dependencies = [ "async-io", - "futures 0.3.12", + "futures 0.3.13", "futures-lite", "if-addrs", "ipnet", @@ -2587,7 +2654,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df170efa359aebdd5cb7fe78edcc67107748e4737bdca8a8fb40d15ea7a877ed" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", ] [[package]] @@ -2612,9 +2679,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.1" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" +checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" dependencies = [ "autocfg", "hashbrown", @@ -2648,7 +2715,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "futures-timer 2.0.2", ] @@ -2667,6 +2734,18 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ee15951c035f79eddbef745611ec962f63f4558f1dadf98ab723cc603487c6f" +[[package]] +name = "ipconfig" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +dependencies = [ + "socket2 0.3.19", + "widestring", + "winapi 0.3.9", + "winreg", +] + [[package]] name = "ipnet" version = "2.3.0" @@ -2708,9 +2787,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" +checksum = "5cfb73131c35423a367daf8cbd24100af0d077668c8c2943f0e7dd775fef0f65" dependencies = [ "wasm-bindgen", ] @@ -2722,8 +2801,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "489b9c612e60c766f751ab40fcb43cbb55a1e10bb44a9b4307ed510ca598cbd7" dependencies = [ "failure", - "futures 0.1.30", - "hyper 0.12.35", + "futures 0.1.31", + "hyper 0.12.36", "jsonrpc-core", "jsonrpc-pubsub", "log", @@ -2738,7 +2817,7 @@ version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0745a6379e3edc893c84ec203589790774e4247420033e71a76d3ab4687991fa" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "log", "serde", "serde_derive", @@ -2772,7 +2851,7 @@ version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb5c4513b7b542f42da107942b7b759f27120b5cc894729f88254b28dff44b7" dependencies = [ - "hyper 0.12.35", + "hyper 0.12.36", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2845,8 +2924,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "124797a4ea7430d0675db78e065e53316e3f1a3cbf0ee4d6dbdd42db7b08e193" dependencies = [ "async-trait", - "futures 0.3.12", - "hyper 0.13.9", + "futures 0.3.13", + "hyper 0.13.10", "jsonrpsee-types", "jsonrpsee-utils", "log", @@ -2854,7 +2933,7 @@ dependencies = [ "serde_json", "thiserror", "unicase", - "url 2.2.0", + "url 2.2.1", ] [[package]] @@ -2876,7 +2955,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a8cd20c190e75dc56f7543b9d5713c3186351b301b5507ea6b85d8c403aac78" dependencies = [ "async-trait", - "futures 0.3.12", + "futures 0.3.13", "log", "serde", "serde_json", @@ -2890,9 +2969,9 @@ version = "0.2.0-alpha" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0e45394ec3175a767c3c5bac584560e6ad9b56ebd73216c85ec8bab49619244" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "globset", - "hyper 0.13.9", + "hyper 0.13.10", "jsonrpsee-types", "lazy_static", "log", @@ -2980,7 +3059,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb1e98ba343d0b35f9009a8844cd2b87fa3192f7e79033ac05b00aeae0f3b0b5" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "js-sys", "kvdb", "kvdb-memorydb", @@ -3012,9 +3091,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.84" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cca32fa0182e8c0989459524dc356b8f2b5c10f1b9eb521b7d182c03cf8c5ff" +checksum = "ba4aede83fc3617411dc6993bc8c70919750c1c257c6ca6a502aed6e0e2394ae" [[package]] name = "libloading" @@ -3034,13 +3113,13 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.35.1" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc225a49973cf9ab10d0cdd6a4b8f0cda299df9b760824bbb623f15f8f0c95a" +checksum = "fe5759b526f75102829c15e4d8566603b4bf502ed19b5f35920d98113873470d" dependencies = [ "atomic", "bytes 1.0.1", - "futures 0.3.12", + "futures 0.3.13", "lazy_static", "libp2p-core", "libp2p-deflate", @@ -3055,6 +3134,7 @@ dependencies = [ "libp2p-ping", "libp2p-plaintext", "libp2p-pnet", + "libp2p-relay", "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-derive", @@ -3065,23 +3145,23 @@ dependencies = [ "libp2p-yamux", "parity-multiaddr", "parking_lot 0.11.1", - "pin-project 1.0.4", + "pin-project 1.0.5", "smallvec 1.6.1", "wasm-timer", ] [[package]] name = "libp2p-core" -version = "0.27.1" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2d56aadc2c2bf22cd7797f86e56a65b5b3994a0136b65be3106938acae7a26" +checksum = "c1e1797734bbd4c453664fefb029628f77c356ffc5bce98f06b18a7db3ebb0f7" dependencies = [ "asn1_der", "bs58", "ed25519-dalek", "either", "fnv", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "lazy_static", "libsecp256k1", @@ -3090,7 +3170,7 @@ dependencies = [ "multistream-select", "parity-multiaddr", "parking_lot 0.11.1", - "pin-project 1.0.4", + "pin-project 1.0.5", "prost", "prost-build", "rand 0.7.3", @@ -3106,35 +3186,38 @@ dependencies = [ [[package]] name = "libp2p-deflate" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d42eed63305f0420736fa487f9acef720c4528bd7852a6a760f5ccde4813345" +checksum = "a2181a641cd15f9b6ba71b1335800f309012a0a97a29ffaabbbf40e9d3d58f08" dependencies = [ "flate2", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", ] [[package]] name = "libp2p-dns" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5153b6db68fd4baa3b304e377db744dd8fea8ff4e4504509ee636abcde88d3e3" +checksum = "9712eb3e9f7dcc77cc5ca7d943b6a85ce4b1faaf91a67e003442412a26d6d6f8" dependencies = [ - "futures 0.3.12", + "async-std-resolver", + "futures 0.3.13", "libp2p-core", "log", + "smallvec 1.6.1", + "trust-dns-resolver", ] [[package]] name = "libp2p-floodsub" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c63dfa06581b24b1d12bf9815b43689a784424be217d6545c800c7c75a207f" +checksum = "897645f99e9b396df256a6aa8ba8c4bc019ac6b7c62556f624b5feea9acc82bb" dependencies = [ "cuckoofilter", "fnv", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "libp2p-swarm", "log", @@ -3146,16 +3229,16 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502dc5fcbfec4aa1c63ef3f7307ffe20e90c1a1387bf23ed0bec087f2dde58a1" +checksum = "794b0c85f5df1acbc1fc38414d37272594811193b6325c76d3931c3e3f5df8c0" dependencies = [ "asynchronous-codec 0.6.0", "base64 0.13.0", "byteorder", "bytes 1.0.1", "fnv", - "futures 0.3.12", + "futures 0.3.13", "hex_fmt", "libp2p-core", "libp2p-swarm", @@ -3172,11 +3255,11 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b40fb36a059b7a8cce1514bd8b546fa612e006c9937caa7f5950cb20021fe91e" +checksum = "f88ebc841d744979176ab4b8b294a3e655a7ba4ef26a905d073a52b49ed4dff5" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "libp2p-swarm", "log", @@ -3188,16 +3271,16 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3da6c9acbcc05f93235d201d7d45ef4e8b88a45d8836f98becd8b4d443f066" +checksum = "bbb5b90b6bda749023a85f60b49ea74b387c25f17d8df541ae72a3c75dd52e63" dependencies = [ "arrayvec 0.5.2", "asynchronous-codec 0.6.0", "bytes 1.0.1", "either", "fnv", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "libp2p-swarm", "log", @@ -3214,34 +3297,34 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9e6374814d1b118d97ccabdfc975c8910bd16dc38a8bc058eeb08bf2080fe1" +checksum = "be28ca13bb648d249a9baebd750ebc64ce7040ddd5f0ce1035ff1f4549fb596d" dependencies = [ "async-io", "data-encoding", "dns-parser", - "futures 0.3.12", + "futures 0.3.13", "if-watch", "lazy_static", "libp2p-core", "libp2p-swarm", "log", - "rand 0.7.3", + "rand 0.8.3", "smallvec 1.6.1", - "socket2", + "socket2 0.4.0", "void", ] [[package]] name = "libp2p-mplex" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350ce8b3923594aedabd5d6e3f875d058435052a29c3f32df378bc70d10be464" +checksum = "85e9b544335d1ed30af71daa96edbefadef6f19c7a55f078b9fc92c87163105d" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.0.1", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "log", "nohash-hasher", @@ -3253,13 +3336,13 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aca322b52a0c5136142a7c3971446fb1e9964923a526c9cc6ef3b7c94e57778" +checksum = "36db0f0db3b0433f5b9463f1c0cd9eadc0a3734a9170439ce501ff99733a88bd" dependencies = [ "bytes 1.0.1", "curve25519-dalek 3.0.2", - "futures 0.3.12", + "futures 0.3.13", "lazy_static", "libp2p-core", "log", @@ -3275,11 +3358,11 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f3813276d0708c8db0f500d8beda1bda9ad955723b9cb272c41f4727256f73c" +checksum = "dea10fc5209260915ea65b78f612d7ff78a29ab288e7aa3250796866af861c45" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "libp2p-swarm", "log", @@ -3290,13 +3373,13 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d58defcadb646ae4b033e130b48d87410bf76394dc3335496cae99dac803e61" +checksum = "0c8c37b4d2a075b4be8442760a5f8c037180f0c8dd5b5734b9978ab868b3aa11" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.0.1", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "log", "prost", @@ -3311,23 +3394,46 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ce3374f3b28162db9d3442c9347c4f14cb01e8290052615c7d341d40eae0599" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "log", - "pin-project 1.0.4", + "pin-project 1.0.5", "rand 0.7.3", "salsa20", "sha3", ] +[[package]] +name = "libp2p-relay" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff268be6a9d6f3c6cca3b81bbab597b15217f9ad8787c6c40fc548c1af7cd24" +dependencies = [ + "asynchronous-codec 0.6.0", + "bytes 1.0.1", + "futures 0.3.13", + "futures-timer 3.0.2", + "libp2p-core", + "libp2p-swarm", + "log", + "pin-project 1.0.5", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec 1.6.1", + "unsigned-varint 0.7.0", + "void", + "wasm-timer", +] + [[package]] name = "libp2p-request-response" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10e5552827c33d8326502682da73a0ba4bfa40c1b55b216af3c303f32169dd89" +checksum = "725367dd2318c54c5ab1a6418592e5b01c63b0dedfbbfb8389220b2bcf691899" dependencies = [ "async-trait", "bytes 1.0.1", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "libp2p-swarm", "log", @@ -3341,12 +3447,12 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.27.2" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7955b973e1fd2bd61ffd43ce261c1223f61f4aacd5bae362a924993f9a25fd98" +checksum = "75c26980cadd7c25d89071cb23e1f7f5df4863128cc91d83c6ddc72338cecafa" dependencies = [ "either", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "log", "rand 0.7.3", @@ -3367,40 +3473,40 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a5aef80e519a6cb8e2663605142f97baaaea1a252eecbf8756184765f7471b" +checksum = "2b1a27d21c477951799e99d5c105d78868258502ce092988040a808d5a19bbd9" dependencies = [ "async-io", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "if-watch", "ipnet", "libc", "libp2p-core", "log", - "socket2", + "socket2 0.4.0", ] [[package]] name = "libp2p-uds" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80ac51ce419f60be966e02103c17f67ff5dc4422ba83ba54d251d6c62a4ed487" +checksum = "ffd6564bb3b7ff203661ccbb69003c2b551e34cef974f2d6c6a28306a12170b5" dependencies = [ "async-std", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "log", ] [[package]] name = "libp2p-wasm-ext" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6149c46cb76935c80bc8be6ec6e3ebd5f5e1679765a255fb34331d54610f15dd" +checksum = "6df65fc13f6188edf7e6927b086330448b3ca27af86b49748c6d299d7c8d9040" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "js-sys", "libp2p-core", "parity-send-wrapper", @@ -3410,29 +3516,29 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3b1c6a3431045da8b925ed83384e4c5163e14b990572307fca9c507435d4d22" +checksum = "cace60995ef6f637e4752cccbb2590f6bc358e8741a0d066307636c69a4b3a74" dependencies = [ "either", - "futures 0.3.12", + "futures 0.3.13", "futures-rustls", "libp2p-core", "log", "quicksink", "rw-stream-sink", "soketto", - "url 2.2.0", + "url 2.2.1", "webpki-roots", ] [[package]] name = "libp2p-yamux" -version = "0.30.1" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4819358c542a86ff95f6ae691efb4b94ddaf477079b01a686f5705b79bfc232a" +checksum = "96d6144cc94143fb0a8dd1e7c2fbcc32a2808168bcd1d69920635424d5993b7b" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "parking_lot 0.11.1", "thiserror", @@ -3558,6 +3664,15 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "lru-cache" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "mach" version = "0.3.2" @@ -3573,6 +3688,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + [[package]] name = "matchers" version = "0.0.1" @@ -3621,9 +3742,9 @@ dependencies = [ [[package]] name = "memmap2" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e73be3b7d04a0123e933fea1d50d126cc7196bbc0362c0ce426694f777194eee" +checksum = "04e3e85b970d650e2ae6d70592474087051c11c54da7f7b4949725c5735fbcc6" dependencies = [ "libc", ] @@ -3677,18 +3798,18 @@ dependencies = [ [[package]] name = "minicbor" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3265a9f5210bb726f81ef9c456ae0aff5321cd95748c0e71889b0e19d8f0332b" +checksum = "ea79ce4ab9f445ec6b71833a2290ac0a29c9dde0fa7cae4c481eecae021d9bd9" dependencies = [ "minicbor-derive", ] [[package]] name = "minicbor-derive" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "130b9455e28a3f308f6579671816a6f2621e2e0cbf55dc2f886345bef699481e" +checksum = "19ce18b5423c573a13e80cb3046ea0af6379ef725dc3af4886bdb8f4e5093068" dependencies = [ "proc-macro2", "quote", @@ -3697,9 +3818,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ "adler", "autocfg", @@ -3777,7 +3898,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ - "socket2", + "socket2 0.3.19", "winapi 0.3.9", ] @@ -3837,16 +3958,16 @@ checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] name = "multistream-select" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10ddc0eb0117736f19d556355464fc87efc8ad98b29e3fd84f02531eb6e90840" +checksum = "7d91ec0a2440aaff5f78ec35631a7027d50386c6163aa975f7caa0d5da4b6ff8" dependencies = [ "bytes 1.0.1", - "futures 0.3.12", + "futures 0.3.13", "log", - "pin-project 1.0.4", + "pin-project 1.0.5", "smallvec 1.6.1", - "unsigned-varint 0.6.0", + "unsigned-varint 0.7.0", ] [[package]] @@ -3856,7 +3977,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6b6147c3d50b4f3cdabfe2ecc94a0191fd3d6ad58aefd9664cf396285883486" dependencies = [ "approx", - "generic-array 0.13.2", + "generic-array 0.13.3", "matrixmultiply", "num-complex", "num-rational", @@ -3883,7 +4004,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "670361df1bc2399ee1ff50406a0d422587dd3bb0da596e1978fe8e05dabddf4f" dependencies = [ "libc", - "socket2", + "socket2 0.3.19", ] [[package]] @@ -3915,7 +4036,7 @@ version = "0.8.0" dependencies = [ "derive_more", "fs_extra", - "futures 0.3.12", + "futures 0.3.13", "hash-db", "hex", "kvdb", @@ -3951,7 +4072,7 @@ dependencies = [ name = "node-browser-testing" version = "2.0.0" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "jsonrpc-core", "libp2p", @@ -3973,7 +4094,7 @@ dependencies = [ "frame-benchmarking-cli", "frame-support", "frame-system", - "futures 0.3.12", + "futures 0.3.13", "hex-literal", "libp2p-wasm-ext", "log", @@ -3992,7 +4113,7 @@ dependencies = [ "pallet-staking", "pallet-timestamp", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "platforms", "rand 0.7.3", @@ -4052,6 +4173,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "futures 0.3.13", "node-primitives", "node-runtime", "node-testing", @@ -4064,7 +4186,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-executor", "sp-application-crypto", "sp-consensus-babe", @@ -4086,7 +4208,7 @@ version = "0.8.0" dependencies = [ "derive_more", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-cli", "sc-client-api", "sc-service", @@ -4101,7 +4223,7 @@ name = "node-primitives" version = "2.0.0" dependencies = [ "frame-system", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "pretty_assertions", "sp-application-crypto", "sp-core", @@ -4145,8 +4267,8 @@ dependencies = [ name = "node-rpc-client" version = "2.0.0" dependencies = [ - "futures 0.1.30", - "hyper 0.12.35", + "futures 0.1.31", + "hyper 0.12.36", "jsonrpc-core-client", "log", "node-primitives", @@ -4209,7 +4331,7 @@ dependencies = [ "pallet-treasury", "pallet-utility", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-api", "sp-authority-discovery", @@ -4219,6 +4341,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keyring", + "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-session", @@ -4232,7 +4355,7 @@ dependencies = [ [[package]] name = "node-template" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-benchmarking-cli", @@ -4269,7 +4392,7 @@ dependencies = [ [[package]] name = "node-template-runtime" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-executive", @@ -4287,7 +4410,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-api", "sp-block-builder", @@ -4311,7 +4434,7 @@ dependencies = [ "frame-support", "frame-system", "fs_extra", - "futures 0.3.12", + "futures 0.3.13", "log", "node-executor", "node-primitives", @@ -4326,7 +4449,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-block-builder", "sc-cli", "sc-client-api", @@ -4444,9 +4567,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.5.2" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" dependencies = [ "parking_lot 0.11.1", ] @@ -4501,7 +4624,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4516,7 +4639,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4533,7 +4656,7 @@ dependencies = [ "lazy_static", "pallet-session", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "serde", "sp-application-crypto", @@ -4551,7 +4674,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-application-crypto", "sp-authority-discovery", @@ -4569,7 +4692,7 @@ dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-authorship", "sp-core", @@ -4584,6 +4707,7 @@ name = "pallet-babe" version = "3.0.0" dependencies = [ "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", "log", @@ -4594,13 +4718,12 @@ dependencies = [ "pallet-staking", "pallet-staking-reward-curve", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-application-crypto", "sp-consensus-babe", "sp-consensus-vrf", "sp-core", - "sp-election-providers", "sp-io", "sp-runtime", "sp-session", @@ -4617,7 +4740,7 @@ dependencies = [ "frame-system", "log", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4634,7 +4757,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4653,7 +4776,7 @@ dependencies = [ "hex-literal", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4676,7 +4799,7 @@ dependencies = [ "pallet-contracts-proc-macro", "pallet-randomness-collective-flip", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-wasm 0.41.0", "paste 1.0.4", "pretty_assertions", @@ -4698,7 +4821,7 @@ name = "pallet-contracts-primitives" version = "3.0.0" dependencies = [ "bitflags", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-runtime", "sp-std", ] @@ -4721,7 +4844,7 @@ dependencies = [ "jsonrpc-derive", "pallet-contracts-primitives", "pallet-contracts-rpc-runtime-api", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "serde_json", "sp-api", @@ -4736,7 +4859,7 @@ name = "pallet-contracts-rpc-runtime-api" version = "3.0.0" dependencies = [ "pallet-contracts-primitives", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-runtime", "sp-std", @@ -4752,7 +4875,7 @@ dependencies = [ "hex-literal", "pallet-balances", "pallet-scheduler", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4767,19 +4890,19 @@ name = "pallet-election-provider-multi-phase" version = "3.0.0" dependencies = [ "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", "hex-literal", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "paste 1.0.4", "rand 0.7.3", "serde", "sp-arithmetic", "sp-core", - "sp-election-providers", "sp-io", "sp-npos-elections", "sp-runtime", @@ -4797,7 +4920,7 @@ dependencies = [ "frame-system", "hex-literal", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4815,7 +4938,7 @@ dependencies = [ "hex-literal", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4827,13 +4950,14 @@ dependencies = [ [[package]] name = "pallet-example" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4849,7 +4973,7 @@ dependencies = [ "frame-system", "lite-json", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4864,7 +4988,7 @@ version = "2.0.1" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4881,7 +5005,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-arithmetic", "sp-core", @@ -4896,6 +5020,7 @@ version = "3.0.0" dependencies = [ "finality-grandpa", "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", "log", @@ -4906,11 +5031,10 @@ dependencies = [ "pallet-staking", "pallet-staking-reward-curve", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-application-crypto", "sp-core", - "sp-election-providers", "sp-finality-grandpa", "sp-io", "sp-keyring", @@ -4929,7 +5053,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4947,7 +5071,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-application-crypto", "sp-core", @@ -4965,7 +5089,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4983,7 +5107,7 @@ dependencies = [ "frame-support-test", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4997,7 +5121,7 @@ version = "3.0.0" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5010,13 +5134,13 @@ name = "pallet-mmr" version = "3.0.0" dependencies = [ "ckb-merkle-mountain-range", - "env_logger 0.8.2", + "env_logger 0.8.3", "frame-benchmarking", "frame-support", "frame-system", "hex-literal", "pallet-mmr-primitives", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5032,7 +5156,7 @@ dependencies = [ "frame-system", "hex-literal", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-api", "sp-core", @@ -5048,7 +5172,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "pallet-mmr-primitives", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "serde_json", "sp-api", @@ -5066,7 +5190,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5081,7 +5205,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5091,12 +5215,12 @@ dependencies = [ [[package]] name = "pallet-node-authorization" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5112,7 +5236,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5126,6 +5250,7 @@ name = "pallet-offences-benchmarking" version = "3.0.0" dependencies = [ "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", "pallet-babe", @@ -5137,10 +5262,9 @@ dependencies = [ "pallet-staking", "pallet-staking-reward-curve", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", - "sp-election-providers", "sp-io", "sp-runtime", "sp-staking", @@ -5156,7 +5280,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-utility", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5170,7 +5294,7 @@ version = "3.0.0" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "safe-mix", "serde", "sp-core", @@ -5187,7 +5311,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5203,7 +5327,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5219,7 +5343,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5236,7 +5360,7 @@ dependencies = [ "impl-trait-for-tuples", "lazy_static", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-application-crypto", "sp-core", @@ -5253,6 +5377,7 @@ name = "pallet-session-benchmarking" version = "3.0.0" dependencies = [ "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", "pallet-balances", @@ -5260,11 +5385,10 @@ dependencies = [ "pallet-staking", "pallet-staking-reward-curve", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "rand 0.7.3", "serde", "sp-core", - "sp-election-providers", "sp-io", "sp-runtime", "sp-session", @@ -5279,7 +5403,7 @@ dependencies = [ "frame-support-test", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "rand_chacha 0.2.2", "serde", "sp-core", @@ -5293,6 +5417,7 @@ name = "pallet-staking" version = "3.0.0" dependencies = [ "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", "hex", @@ -5302,15 +5427,14 @@ dependencies = [ "pallet-session", "pallet-staking-reward-curve", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", + "paste 1.0.4", "rand_chacha 0.2.2", "serde", "sp-application-crypto", "sp-core", - "sp-election-providers", "sp-io", - "sp-npos-elections", "sp-runtime", "sp-staking", "sp-std", @@ -5320,29 +5444,6 @@ dependencies = [ "substrate-test-utils", ] -[[package]] -name = "pallet-staking-fuzz" -version = "0.0.0" -dependencies = [ - "frame-support", - "frame-system", - "honggfuzz", - "pallet-balances", - "pallet-indices", - "pallet-session", - "pallet-staking", - "pallet-staking-reward-curve", - "pallet-timestamp", - "parity-scale-codec", - "serde", - "sp-core", - "sp-election-providers", - "sp-io", - "sp-npos-elections", - "sp-runtime", - "sp-std", -] - [[package]] name = "pallet-staking-reward-curve" version = "3.0.0" @@ -5354,13 +5455,21 @@ dependencies = [ "syn", ] +[[package]] +name = "pallet-staking-reward-fn" +version = "3.0.0" +dependencies = [ + "log", + "sp-arithmetic", +] + [[package]] name = "pallet-sudo" version = "3.0.0" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5370,12 +5479,12 @@ dependencies = [ [[package]] name = "pallet-template" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5391,7 +5500,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-inherents", @@ -5410,7 +5519,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5426,7 +5535,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "serde_json", "smallvec 1.6.1", @@ -5445,7 +5554,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-blockchain", "sp-core", @@ -5458,7 +5567,7 @@ name = "pallet-transaction-payment-rpc-runtime-api" version = "3.0.0" dependencies = [ "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-runtime", ] @@ -5472,7 +5581,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5489,7 +5598,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5507,7 +5616,7 @@ dependencies = [ "frame-system", "hex-literal", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -5518,12 +5627,13 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111e193c96758d476d272093a853882668da17489f76bf4361b8decae0b6c515" +checksum = "495197c078e54b8735181aa35c00a327f7f3a3cc00a1ee8c95926dd010f0ec6b" dependencies = [ "blake2-rfc", "crc32fast", + "fs2", "hex", "libc", "log", @@ -5534,9 +5644,9 @@ dependencies = [ [[package]] name = "parity-multiaddr" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c6805f98667a3828afb2ec2c396a8d610497e8d546f5447188aae47c5a79ec" +checksum = "58341485071825827b7f03cf7efd1cb21e6a709bea778fb50227fd45d2f361b4" dependencies = [ "arrayref", "bs58", @@ -5547,27 +5657,39 @@ dependencies = [ "serde", "static_assertions", "unsigned-varint 0.7.0", - "url 2.2.0", + "url 2.2.1", ] [[package]] name = "parity-scale-codec" -version = "2.0.0" +version = "1.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75c823fdae1bb5ff5708ee61a62697e6296175dc671710876871c853f48592b3" +checksum = "a4b26b16c7687c3075982af47719e481815df30bc544f7a6690763a25ca16e9d" dependencies = [ "arrayvec 0.5.2", - "bitvec", - "byte-slice-cast", + "bitvec 0.17.4", + "byte-slice-cast 0.3.5", + "serde", +] + +[[package]] +name = "parity-scale-codec" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cd3dab59b5cf4bc81069ade0fc470341a1ef3ad5fa73e5a8943bed2ec12b2e8" +dependencies = [ + "arrayvec 0.5.2", + "bitvec 0.20.2", + "byte-slice-cast 1.0.0", "parity-scale-codec-derive", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9029e65297c7fd6d7013f0579e193ec2b34ae78eabca854c9417504ad8a2d214" +checksum = "fa04976a81fde04924b40cc4036c4d12841e8bb04325a5cf2ada75731a150a7d" dependencies = [ "proc-macro-crate 0.1.5", "proc-macro2", @@ -5588,7 +5710,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e57fea504fea33f9fbb5f49f378359030e7e026a6ab849bb9e8f0787376f1bf" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "libc", "log", "mio-named-pipes", @@ -5657,7 +5779,7 @@ dependencies = [ "rand 0.7.3", "sha-1 0.8.2", "slab", - "url 2.2.0", + "url 2.2.1", ] [[package]] @@ -5695,7 +5817,7 @@ checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" dependencies = [ "instant", "lock_api 0.4.2", - "parking_lot_core 0.8.2", + "parking_lot_core 0.8.3", ] [[package]] @@ -5729,14 +5851,14 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" +checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall 0.1.57", + "redox_syscall 0.2.5", "smallvec 1.6.1", "winapi 0.3.9", ] @@ -5873,11 +5995,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" +checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" dependencies = [ - "pin-project-internal 1.0.4", + "pin-project-internal 1.0.5", ] [[package]] @@ -5893,9 +6015,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" +checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" dependencies = [ "proc-macro2", "quote", @@ -5910,9 +6032,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cf491442e4b033ed1c722cb9f0df5fcfcf4de682466c46469c36bc47dc5548a" +checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" [[package]] name = "pin-utils" @@ -6228,7 +6350,7 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ - "env_logger 0.8.2", + "env_logger 0.8.3", "log", "rand 0.8.3", ] @@ -6246,13 +6368,19 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" + [[package]] name = "radium" version = "0.6.2" @@ -6423,7 +6551,7 @@ checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" dependencies = [ "crossbeam-channel", "crossbeam-deque 0.8.0", - "crossbeam-utils 0.8.1", + "crossbeam-utils 0.8.3", "lazy_static", "num_cpus", ] @@ -6445,9 +6573,9 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_syscall" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ec8ca9416c5ea37062b502703cd7fcb207736bc294f6e0cf367ac6fc234570" +checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" dependencies = [ "bitflags", ] @@ -6470,7 +6598,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ "getrandom 0.2.2", - "redox_syscall 0.2.4", + "redox_syscall 0.2.5", ] [[package]] @@ -6549,16 +6677,17 @@ dependencies = [ name = "remote-externalities" version = "0.9.0" dependencies = [ - "async-std", - "env_logger 0.8.2", + "env_logger 0.8.3", "hex-literal", "jsonrpsee-http-client", "jsonrpsee-proc-macros", "jsonrpsee-types", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-core", "sp-io", + "sp-runtime", + "tokio 0.2.25", ] [[package]] @@ -6570,6 +6699,16 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "resolv-conf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error 1.2.3", +] + [[package]] name = "retain_mut" version = "0.1.2" @@ -6578,9 +6717,9 @@ checksum = "53552c6c49e1e13f1a203ef0080ab3bbef0beb570a528993e83df057a9d9bba1" [[package]] name = "ring" -version = "0.16.19" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", @@ -6620,7 +6759,7 @@ dependencies = [ "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.8.1", + "crossbeam-utils 0.8.3", ] [[package]] @@ -6700,7 +6839,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "pin-project 0.4.27", "static_assertions", ] @@ -6745,11 +6884,11 @@ dependencies = [ "async-trait", "derive_more", "either", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "prost", "prost-build", "quickcheck", @@ -6773,10 +6912,10 @@ dependencies = [ name = "sc-basic-authorship" version = "0.9.0" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", @@ -6798,7 +6937,7 @@ dependencies = [ name = "sc-block-builder" version = "0.9.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-client-api", "sp-api", "sp-block-builder", @@ -6816,7 +6955,7 @@ name = "sc-chain-spec" version = "3.0.0" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-chain-spec-derive", "sc-consensus-babe", "sc-consensus-epochs", @@ -6847,12 +6986,12 @@ version = "0.9.0" dependencies = [ "chrono", "fdlimit", - "futures 0.3.12", + "futures 0.3.13", "hex", "libp2p", "log", "names", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "rand 0.7.3", "regex", "rpassword", @@ -6885,13 +7024,13 @@ version = "3.0.0" dependencies = [ "derive_more", "fnv", - "futures 0.3.12", + "futures 0.3.13", "hash-db", "kvdb", "kvdb-memorydb", "lazy_static", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sc-executor", "sp-api", @@ -6928,7 +7067,7 @@ dependencies = [ "linked-hash-map", "log", "parity-db", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "parking_lot 0.11.1", "quickcheck", @@ -6954,6 +7093,7 @@ dependencies = [ name = "sc-consensus" version = "0.9.0" dependencies = [ + "parking_lot 0.11.1", "sc-client-api", "sp-blockchain", "sp-consensus", @@ -6964,12 +7104,13 @@ dependencies = [ name = "sc-consensus-aura" version = "0.9.0" dependencies = [ + "async-trait", "derive_more", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "getrandom 0.2.2", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", @@ -7005,16 +7146,17 @@ dependencies = [ name = "sc-consensus-babe" version = "0.9.0" dependencies = [ + "async-trait", "derive_more", "fork-tree", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "log", "merlin", "num-bigint", "num-rational", "num-traits", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "pdqselect", "rand 0.7.3", @@ -7061,7 +7203,7 @@ name = "sc-consensus-babe-rpc" version = "0.9.0" dependencies = [ "derive_more", - "futures 0.3.12", + "futures 0.3.13", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -7090,9 +7232,9 @@ name = "sc-consensus-epochs" version = "0.9.0" dependencies = [ "fork-tree", - "parity-scale-codec", - "parking_lot 0.11.1", + "parity-scale-codec 2.0.1", "sc-client-api", + "sc-consensus", "sp-blockchain", "sp-runtime", ] @@ -7102,13 +7244,14 @@ name = "sc-consensus-manual-seal" version = "0.9.0" dependencies = [ "assert_matches", + "async-trait", "derive_more", - "futures 0.3.12", + "futures 0.3.13", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sc-basic-authorship", "sc-client-api", @@ -7139,11 +7282,12 @@ dependencies = [ name = "sc-consensus-pow" version = "0.9.0" dependencies = [ + "async-trait", "derive_more", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sc-client-api", "sp-api", @@ -7162,11 +7306,11 @@ dependencies = [ name = "sc-consensus-slots" version = "0.9.0" dependencies = [ - "futures 0.3.12", + "async-trait", + "futures 0.3.13", "futures-timer 3.0.2", "log", - "parity-scale-codec", - "parking_lot 0.11.1", + "parity-scale-codec 2.0.1", "sc-client-api", "sc-telemetry", "sp-api", @@ -7179,6 +7323,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-timestamp", "sp-trie", "substrate-test-runtime-client", "thiserror", @@ -7207,7 +7352,7 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-wasm 0.41.0", "parking_lot 0.11.1", "paste 1.0.4", @@ -7242,7 +7387,7 @@ name = "sc-executor-common" version = "0.9.0" dependencies = [ "derive_more", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-wasm 0.41.0", "sp-allocator", "sp-core", @@ -7257,7 +7402,7 @@ name = "sc-executor-wasmi" version = "0.9.0" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-executor-common", "sp-allocator", "sp-core", @@ -7272,7 +7417,7 @@ version = "0.9.0" dependencies = [ "assert_matches", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-wasm 0.41.0", "pwasm-utils 0.14.0", "sc-executor-common", @@ -7289,17 +7434,18 @@ name = "sc-finality-grandpa" version = "0.9.0" dependencies = [ "assert_matches", + "async-trait", "derive_more", "dyn-clone", "finality-grandpa", "fork-tree", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "linked-hash-map", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", - "pin-project 1.0.4", + "pin-project 1.0.5", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -7337,14 +7483,14 @@ version = "0.9.0" dependencies = [ "derive_more", "finality-grandpa", - "futures 0.3.12", + "futures 0.3.13", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", "lazy_static", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-block-builder", "sc-client-api", "sc-finality-grandpa", @@ -7367,10 +7513,10 @@ version = "0.9.0" dependencies = [ "derive_more", "finality-grandpa", - "futures 0.3.12", + "futures 0.3.13", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "prost", "rand 0.8.3", @@ -7392,7 +7538,7 @@ name = "sc-informant" version = "0.9.0" dependencies = [ "ansi_term 0.12.1", - "futures 0.3.12", + "futures 0.3.13", "log", "parity-util-mem", "sc-client-api", @@ -7410,7 +7556,7 @@ version = "3.0.0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.12", + "futures 0.3.13", "futures-util", "hex", "merlin", @@ -7430,7 +7576,7 @@ version = "3.0.0" dependencies = [ "hash-db", "lazy_static", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sc-client-api", "sc-executor", @@ -7459,7 +7605,7 @@ dependencies = [ "erased-serde", "fnv", "fork-tree", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "hex", "ip_network", @@ -7469,9 +7615,9 @@ dependencies = [ "log", "lru", "nohash-hasher", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", - "pin-project 1.0.4", + "pin-project 1.0.5", "prost", "prost-build", "quickcheck", @@ -7507,7 +7653,7 @@ name = "sc-network-gossip" version = "0.9.0" dependencies = [ "async-std", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "libp2p", "log", @@ -7518,6 +7664,7 @@ dependencies = [ "sp-runtime", "substrate-prometheus-endpoint", "substrate-test-runtime-client", + "tracing", "wasm-timer", ] @@ -7526,7 +7673,8 @@ name = "sc-network-test" version = "0.8.0" dependencies = [ "async-std", - "futures 0.3.12", + "async-trait", + "futures 0.3.13", "futures-timer 3.0.2", "libp2p", "log", @@ -7554,15 +7702,15 @@ version = "3.0.0" dependencies = [ "bytes 0.5.6", "fnv", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "hex", - "hyper 0.13.9", + "hyper 0.13.10", "hyper-rustls", "lazy_static", "log", "num_cpus", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "rand 0.7.3", "sc-block-builder", @@ -7588,7 +7736,7 @@ dependencies = [ name = "sc-peerset" version = "3.0.0" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "libp2p", "log", "rand 0.7.3", @@ -7610,14 +7758,14 @@ name = "sc-rpc" version = "3.0.0" dependencies = [ "assert_matches", - "futures 0.1.30", - "futures 0.3.12", + "futures 0.1.31", + "futures 0.3.13", "hash-db", "jsonrpc-core", "jsonrpc-pubsub", "lazy_static", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sc-block-builder", "sc-cli", @@ -7652,13 +7800,13 @@ name = "sc-rpc-api" version = "0.9.0" dependencies = [ "derive_more", - "futures 0.3.12", + "futures 0.3.13", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "serde", "serde_json", @@ -7674,7 +7822,7 @@ dependencies = [ name = "sc-rpc-server" version = "3.0.0" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "jsonrpc-core", "jsonrpc-http-server", "jsonrpc-ipc-server", @@ -7706,20 +7854,21 @@ name = "sc-service" version = "0.9.0" dependencies = [ "async-std", + "async-trait", "directories", "exit-future", - "futures 0.1.30", - "futures 0.3.12", + "futures 0.1.31", + "futures 0.3.13", "futures-timer 3.0.2", "hash-db", "jsonrpc-core", "jsonrpc-pubsub", "lazy_static", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "parking_lot 0.11.1", - "pin-project 1.0.4", + "pin-project 1.0.5", "rand 0.7.3", "sc-block-builder", "sc-chain-spec", @@ -7775,11 +7924,11 @@ name = "sc-service-test" version = "2.0.0" dependencies = [ "fdlimit", - "futures 0.1.30", - "futures 0.3.12", + "futures 0.1.31", + "futures 0.3.13", "hex-literal", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", @@ -7811,7 +7960,7 @@ name = "sc-state-db" version = "0.9.0" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "parity-util-mem-derive", "parking_lot 0.11.1", @@ -7844,11 +7993,11 @@ name = "sc-telemetry" version = "3.0.0" dependencies = [ "chrono", - "futures 0.3.12", + "futures 0.3.13", "libp2p", "log", "parking_lot 0.11.1", - "pin-project 1.0.4", + "pin-project 1.0.5", "rand 0.7.3", "serde", "serde_json", @@ -7901,10 +8050,10 @@ dependencies = [ "assert_matches", "criterion", "derive_more", - "futures 0.3.12", + "futures 0.3.13", "linked-hash-map", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "parking_lot 0.11.1", "retain_mut", @@ -7924,12 +8073,12 @@ name = "sc-transaction-pool" version = "3.0.0" dependencies = [ "assert_matches", - "futures 0.3.12", + "futures 0.3.13", "futures-diagnose", "hex", "intervalier", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "parking_lot 0.11.1", "sc-block-builder", @@ -8111,9 +8260,9 @@ checksum = "930c0acf610d3fdb5e2ab6213019aaa04e227ebe9547b0649ba599b16d788bd7" [[package]] name = "serde" -version = "1.0.123" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" +checksum = "bd761ff957cb2a45fbb9ab3da6512de9de55872866160b23c25f1a841e99d29f" dependencies = [ "serde_derive", ] @@ -8130,9 +8279,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.123" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" +checksum = "1800f7693e94e186f5e25a28291ae1570da908aff7d97a095dec1e56ff99069b" dependencies = [ "proc-macro2", "quote", @@ -8141,9 +8290,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.61" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" dependencies = [ "itoa", "ryu", @@ -8164,9 +8313,9 @@ dependencies = [ [[package]] name = "sha-1" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce3cdf1b5e620a498ee6f2a171885ac7e22f0e12089ec4b3d22b84921792507c" +checksum = "dfebf75d25bd900fd1e7d11501efab59bc846dbc76196839663e6637bba9f25f" dependencies = [ "block-buffer 0.9.0", "cfg-if 1.0.0", @@ -8229,9 +8378,9 @@ checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] name = "signal-hook" -version = "0.1.17" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e31d442c16f047a671b5a71e2161d6e68814012b7f5379d269ebd915fac2729" +checksum = "8a7f3f92a1da3d6b1d32245d0cbcbbab0cfc45996d8df619c42bccfa6d2bbb5f" dependencies = [ "libc", "signal-hook-registry", @@ -8314,6 +8463,16 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "socket2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" +dependencies = [ + "libc", + "winapi 0.3.9", +] + [[package]] name = "soketto" version = "0.4.2" @@ -8323,11 +8482,11 @@ dependencies = [ "base64 0.12.3", "bytes 0.5.6", "flate2", - "futures 0.3.12", + "futures 0.3.13", "httparse", "log", "rand 0.7.3", - "sha-1 0.9.2", + "sha-1 0.9.4", ] [[package]] @@ -8347,7 +8506,7 @@ version = "3.0.0" dependencies = [ "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api-proc-macro", "sp-core", "sp-runtime", @@ -8375,7 +8534,7 @@ version = "2.0.1" dependencies = [ "criterion", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "rustversion", "sc-block-builder", "sp-api", @@ -8394,7 +8553,7 @@ dependencies = [ name = "sp-application-crypto" version = "3.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -8420,7 +8579,7 @@ dependencies = [ "criterion", "integer-sqrt", "num-traits", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "primitive-types", "rand 0.7.3", "serde", @@ -8444,7 +8603,7 @@ dependencies = [ name = "sp-authority-discovery" version = "3.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-application-crypto", "sp-runtime", @@ -8455,7 +8614,7 @@ dependencies = [ name = "sp-authorship" version = "3.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-inherents", "sp-runtime", "sp-std", @@ -8465,7 +8624,7 @@ dependencies = [ name = "sp-block-builder" version = "3.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-inherents", "sp-runtime", @@ -8476,10 +8635,10 @@ dependencies = [ name = "sp-blockchain" version = "3.0.0" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "log", "lru", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sp-api", "sp-consensus", @@ -8501,11 +8660,12 @@ dependencies = [ name = "sp-consensus" version = "0.9.0" dependencies = [ - "futures 0.3.12", + "async-trait", + "futures 0.3.13", "futures-timer 3.0.2", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "serde", "sp-api", @@ -8527,9 +8687,10 @@ dependencies = [ name = "sp-consensus-aura" version = "0.9.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-application-crypto", + "sp-consensus", "sp-consensus-slots", "sp-inherents", "sp-runtime", @@ -8542,7 +8703,7 @@ name = "sp-consensus-babe" version = "0.9.0" dependencies = [ "merlin", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-api", "sp-application-crypto", @@ -8561,7 +8722,7 @@ dependencies = [ name = "sp-consensus-pow" version = "0.9.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-core", "sp-runtime", @@ -8572,7 +8733,7 @@ dependencies = [ name = "sp-consensus-slots" version = "0.9.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-arithmetic", "sp-runtime", ] @@ -8581,7 +8742,7 @@ dependencies = [ name = "sp-consensus-vrf" version = "0.9.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "schnorrkel", "sp-core", "sp-runtime", @@ -8598,7 +8759,7 @@ dependencies = [ "criterion", "dyn-clonable", "ed25519-dalek", - "futures 0.3.12", + "futures 0.3.13", "hash-db", "hash256-std-hasher", "hex", @@ -8609,7 +8770,7 @@ dependencies = [ "log", "merlin", "num-traits", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "parking_lot 0.11.1", "pretty_assertions", @@ -8654,23 +8815,12 @@ dependencies = [ "syn", ] -[[package]] -name = "sp-election-providers" -version = "3.0.0" -dependencies = [ - "parity-scale-codec", - "sp-arithmetic", - "sp-npos-elections", - "sp-runtime", - "sp-std", -] - [[package]] name = "sp-externalities" version = "0.9.0" dependencies = [ "environmental", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-std", "sp-storage", ] @@ -8681,7 +8831,7 @@ version = "3.0.0" dependencies = [ "finality-grandpa", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-api", "sp-application-crypto", @@ -8695,7 +8845,7 @@ dependencies = [ name = "sp-inherents" version = "3.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sp-core", "sp-std", @@ -8706,11 +8856,11 @@ dependencies = [ name = "sp-io" version = "3.0.0" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "hash-db", "libsecp256k1", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sp-core", "sp-externalities", @@ -8741,9 +8891,9 @@ version = "0.9.0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.12", + "futures 0.3.13", "merlin", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "rand 0.7.3", "rand_chacha 0.2.2", @@ -8757,7 +8907,7 @@ dependencies = [ name = "sp-npos-elections" version = "3.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "rand 0.7.3", "serde", "sp-arithmetic", @@ -8772,10 +8922,14 @@ dependencies = [ name = "sp-npos-elections-compact" version = "3.0.0" dependencies = [ + "parity-scale-codec 2.0.1", "proc-macro-crate 1.0.0", "proc-macro2", "quote", + "sp-arithmetic", + "sp-npos-elections", "syn", + "trybuild", ] [[package]] @@ -8783,7 +8937,7 @@ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ "honggfuzz", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "rand 0.7.3", "sp-arithmetic", "sp-npos-elections", @@ -8826,7 +8980,7 @@ dependencies = [ "hash256-std-hasher", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "paste 1.0.4", "rand 0.7.3", @@ -8848,7 +9002,7 @@ name = "sp-runtime-interface" version = "3.0.0" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "primitive-types", "rustversion", "sp-core", @@ -8919,7 +9073,7 @@ name = "sp-sandbox" version = "0.9.0" dependencies = [ "assert_matches", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-core", "sp-io", "sp-std", @@ -8940,7 +9094,7 @@ dependencies = [ name = "sp-session" version = "3.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-core", "sp-runtime", @@ -8952,7 +9106,7 @@ dependencies = [ name = "sp-staking" version = "3.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-runtime", "sp-std", ] @@ -8965,7 +9119,7 @@ dependencies = [ "hex-literal", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "pretty_assertions", "rand 0.7.3", @@ -8990,7 +9144,7 @@ name = "sp-storage" version = "3.0.0" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "ref-cast", "serde", "sp-debug-derive", @@ -9002,7 +9156,7 @@ name = "sp-tasks" version = "3.0.0" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-core", "sp-externalities", "sp-io", @@ -9014,7 +9168,7 @@ dependencies = [ name = "sp-test-primitives" version = "2.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "serde", "sp-application-crypto", @@ -9026,7 +9180,7 @@ dependencies = [ name = "sp-timestamp" version = "3.0.0" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-inherents", "sp-runtime", @@ -9039,7 +9193,7 @@ name = "sp-tracing" version = "3.0.0" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-std", "tracing", "tracing-core", @@ -9051,9 +9205,9 @@ name = "sp-transaction-pool" version = "3.0.0" dependencies = [ "derive_more", - "futures 0.3.12", + "futures 0.3.13", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-api", "sp-blockchain", @@ -9069,7 +9223,7 @@ dependencies = [ "hash-db", "hex-literal", "memory-db", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-core", "sp-runtime", "sp-std", @@ -9083,7 +9237,7 @@ dependencies = [ name = "sp-utils" version = "3.0.0" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "futures-core", "futures-timer 3.0.2", "lazy_static", @@ -9095,7 +9249,7 @@ name = "sp-version" version = "3.0.0" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-runtime", "sp-std", @@ -9106,7 +9260,7 @@ name = "sp-wasm-interface" version = "3.0.0" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-std", "wasmi", ] @@ -9235,8 +9389,8 @@ version = "0.9.0" dependencies = [ "chrono", "console_error_panic_hook", - "futures 0.1.30", - "futures 0.3.12", + "futures 0.1.31", + "futures 0.3.13", "futures-timer 3.0.2", "getrandom 0.2.2", "js-sys", @@ -9278,10 +9432,10 @@ version = "3.0.0" dependencies = [ "frame-support", "frame-system", - "futures 0.3.12", + "futures 0.3.13", "jsonrpc-client-transports", "jsonrpc-core", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-rpc-api", "serde", "sp-storage", @@ -9293,12 +9447,12 @@ name = "substrate-frame-rpc-system" version = "3.0.0" dependencies = [ "frame-system-rpc-runtime-api", - "futures 0.3.12", + "futures 0.3.13", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-client-api", "sc-rpc-api", "sc-transaction-pool", @@ -9320,7 +9474,7 @@ dependencies = [ "async-std", "derive_more", "futures-util", - "hyper 0.13.9", + "hyper 0.13.10", "log", "prometheus", "tokio 0.2.25", @@ -9330,11 +9484,12 @@ dependencies = [ name = "substrate-test-client" version = "2.0.1" dependencies = [ - "futures 0.1.30", - "futures 0.3.12", + "async-trait", + "futures 0.1.31", + "futures 0.3.13", "hash-db", "hex", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-client-api", "sc-client-db", "sc-consensus", @@ -9361,11 +9516,12 @@ dependencies = [ "frame-support", "frame-system", "frame-system-rpc-runtime-api", + "futures 0.3.13", "log", "memory-db", "pallet-babe", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "sc-block-builder", "sc-executor", @@ -9400,8 +9556,8 @@ dependencies = [ name = "substrate-test-runtime-client" version = "2.0.0" dependencies = [ - "futures 0.3.12", - "parity-scale-codec", + "futures 0.3.13", + "parity-scale-codec 2.0.1", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -9421,8 +9577,8 @@ name = "substrate-test-runtime-transaction-pool" version = "2.0.0" dependencies = [ "derive_more", - "futures 0.3.12", - "parity-scale-codec", + "futures 0.3.13", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sc-transaction-graph", "sp-blockchain", @@ -9435,7 +9591,7 @@ dependencies = [ name = "substrate-test-utils" version = "3.0.0" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "sc-service", "substrate-test-utils-derive", "tokio 0.2.25", @@ -9488,9 +9644,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.60" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" +checksum = "123a78a3596b24fee53a6464ce52d8ecbf62241e6294c7e7fe12086cd161f512" dependencies = [ "proc-macro2", "quote", @@ -9517,15 +9673,15 @@ checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" [[package]] name = "tap" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36474e732d1affd3a6ed582781b3683df3d0563714c59c39591e8ff707cf078e" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee5a98e506fb7231a304c3a1bd7c132a55016cf65001e0282480665870dfcb9" +checksum = "422045212ea98508ae3d28025bc5aaa2bd4a9cdaecd442a08da2ee620ee9ea95" [[package]] name = "tempfile" @@ -9536,7 +9692,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "rand 0.8.3", - "redox_syscall 0.2.4", + "redox_syscall 0.2.5", "remove_dir_all", "winapi 0.3.9", ] @@ -9550,6 +9706,87 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "test-runner" +version = "0.9.0" +dependencies = [ + "env_logger 0.7.1", + "frame-system", + "futures 0.1.31", + "futures 0.3.13", + "jsonrpc-core", + "log", + "node-cli", + "parity-scale-codec 1.3.7", + "rand 0.7.3", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus-babe", + "sc-consensus-manual-seal", + "sc-executor", + "sc-finality-grandpa", + "sc-informant", + "sc-network", + "sc-rpc", + "sc-rpc-server", + "sc-service", + "sc-transaction-graph", + "sc-transaction-pool", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-babe", + "sp-core", + "sp-externalities", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-keystore", + "sp-offchain", + "sp-runtime", + "sp-runtime-interface", + "sp-session", + "sp-state-machine", + "sp-transaction-pool", + "sp-wasm-interface", + "tokio 0.2.25", +] + +[[package]] +name = "test-runner-example" +version = "0.1.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "node-cli", + "node-primitives", + "node-runtime", + "pallet-balances", + "pallet-sudo", + "pallet-transaction-payment", + "rand 0.8.3", + "sc-client-api", + "sc-consensus", + "sc-consensus-babe", + "sc-consensus-manual-seal", + "sc-executor", + "sc-finality-grandpa", + "sc-informant", + "sc-network", + "sc-service", + "sp-api", + "sp-consensus-babe", + "sp-inherents", + "sp-keyring", + "sp-keystore", + "sp-runtime", + "test-runner", +] + [[package]] name = "textwrap" version = "0.11.0" @@ -9637,9 +9874,9 @@ dependencies = [ [[package]] name = "tinytemplate" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ada8616fad06a2d0c455adc530de4ef57605a8120cc65da9653e0e9623ca74" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ "serde", "serde_json", @@ -9667,7 +9904,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "mio", "num_cpus", "tokio-codec", @@ -9698,6 +9935,7 @@ dependencies = [ "libc", "memchr", "mio", + "mio-named-pipes", "mio-uds", "num_cpus", "pin-project-lite 0.1.12", @@ -9715,7 +9953,7 @@ checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" dependencies = [ "bytes 0.4.12", "either", - "futures 0.1.30", + "futures 0.1.31", ] [[package]] @@ -9725,7 +9963,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "tokio-io", ] @@ -9735,7 +9973,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "tokio-executor", ] @@ -9746,7 +9984,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" dependencies = [ "crossbeam-utils 0.7.2", - "futures 0.1.30", + "futures 0.1.31", ] [[package]] @@ -9755,7 +9993,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "tokio-io", "tokio-threadpool", ] @@ -9767,7 +10005,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "log", ] @@ -9789,7 +10027,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d282d483052288b2308ba5ee795f5673b159c9bdf63c385a05609da782a5eae" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "mio", "mio-named-pipes", "tokio 0.1.22", @@ -9802,7 +10040,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" dependencies = [ "crossbeam-utils 0.7.2", - "futures 0.1.30", + "futures 0.1.31", "lazy_static", "log", "mio", @@ -9832,7 +10070,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", ] [[package]] @@ -9842,7 +10080,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" dependencies = [ "fnv", - "futures 0.1.30", + "futures 0.1.31", ] [[package]] @@ -9852,7 +10090,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "iovec", "mio", "tokio-io", @@ -9868,7 +10106,7 @@ dependencies = [ "crossbeam-deque 0.7.3", "crossbeam-queue", "crossbeam-utils 0.7.2", - "futures 0.1.30", + "futures 0.1.31", "lazy_static", "log", "num_cpus", @@ -9883,7 +10121,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" dependencies = [ "crossbeam-utils 0.7.2", - "futures 0.1.30", + "futures 0.1.31", "slab", "tokio-executor", ] @@ -9895,7 +10133,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "log", "mio", "tokio-codec", @@ -9910,7 +10148,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "iovec", "libc", "log", @@ -9958,16 +10196,16 @@ checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.5", + "pin-project-lite 0.2.6", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.14" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41768be5b9f3489491825f56f01f25290aa1d3e7cc97e182d4d34360493ba6fa" +checksum = "a8a9bd1db7706f2373a190b0d067146caa39350c486f3d455b0e33b431f94c07" dependencies = [ "proc-macro2", "quote", @@ -9985,19 +10223,19 @@ dependencies = [ [[package]] name = "tracing-futures" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 0.4.27", + "pin-project 1.0.5", "tracing", ] [[package]] name = "tracing-log" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" +checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" dependencies = [ "lazy_static", "log", @@ -10016,9 +10254,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" +checksum = "8ab8966ac3ca27126141f7999361cc97dd6fb4b71da04c02044fa9045d98bb96" dependencies = [ "ansi_term 0.12.1", "chrono", @@ -10052,7 +10290,7 @@ dependencies = [ "hash-db", "keccak-hasher", "memory-db", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "trie-db", "trie-root", "trie-standardmap", @@ -10090,6 +10328,49 @@ dependencies = [ "keccak-hasher", ] +[[package]] +name = "trust-dns-proto" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d57e219ba600dd96c2f6d82eb79645068e14edbc5c7e27514af40436b88150c" +dependencies = [ + "async-trait", + "cfg-if 1.0.0", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.2.2", + "ipnet", + "lazy_static", + "log", + "rand 0.8.3", + "smallvec 1.6.1", + "thiserror", + "tinyvec", + "url 2.2.1", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0437eea3a6da51acc1e946545ff53d5b8fb2611ff1c3bed58522dde100536ae" +dependencies = [ + "cfg-if 1.0.0", + "futures-util", + "ipconfig", + "lazy_static", + "log", + "lru-cache", + "parking_lot 0.11.1", + "resolv-conf", + "smallvec 1.6.1", + "thiserror", + "trust-dns-proto", +] + [[package]] name = "try-lock" version = "0.2.3" @@ -10102,7 +10383,7 @@ version = "0.9.0" dependencies = [ "frame-try-runtime", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "remote-externalities", "sc-cli", "sc-client-api", @@ -10119,9 +10400,9 @@ dependencies = [ [[package]] name = "trybuild" -version = "1.0.39" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c9594b802f041389d2baac680663573dde3103bb4a4926d61d6aba689465978" +checksum = "99471a206425fba51842a9186315f32d91c56eadc21ea4c21f847b59cf778f8b" dependencies = [ "dissimilar", "glob", @@ -10187,9 +10468,9 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" dependencies = [ "tinyvec", ] @@ -10271,12 +10552,12 @@ dependencies = [ [[package]] name = "url" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5909f2b0817350449ed73e8bcd81c8c3c8d9a7a5d8acba4b27db277f1868976e" +checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" dependencies = [ "form_urlencoded", - "idna 0.2.0", + "idna 0.2.2", "matches", "percent-encoding 2.1.0", ] @@ -10352,7 +10633,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "log", "try-lock", ] @@ -10381,9 +10662,9 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.70" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55c0f7123de74f0dab9b7d00fd614e7b19349cd1e2f5252bbe9b1754b59433be" +checksum = "7ee1280240b7c461d6a0071313e08f34a60b0365f14260362e5a2b17d1d31aa7" dependencies = [ "cfg-if 1.0.0", "serde", @@ -10393,9 +10674,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.70" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bc45447f0d4573f3d65720f636bbcc3dd6ce920ed704670118650bcd47764c7" +checksum = "5b7d8b6942b8bb3a9b0e73fc79b98095a27de6fa247615e59d096754a3bc2aa8" dependencies = [ "bumpalo", "lazy_static", @@ -10408,9 +10689,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" +checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -10420,9 +10701,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.70" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b8853882eef39593ad4174dd26fc9865a64e84026d223f63bb2c42affcbba2c" +checksum = "e5ac38da8ef716661f0f36c0d8320b89028efe10c7c0afde65baffb496ce0d3b" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10430,9 +10711,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.70" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4133b5e7f2a531fa413b3a1695e925038a05a71cf67e87dafa295cb645a01385" +checksum = "cc053ec74d454df287b9374ee8abb36ffd5acb95ba87da3ba5b7d3fe20eb401e" dependencies = [ "proc-macro2", "quote", @@ -10443,15 +10724,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.70" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd4945e4943ae02d15c13962b38a5b1e81eadd4b71214eee75af64a4d6a4fd64" +checksum = "7d6f8ec44822dd71f5f221a5847fb34acd9060535c1211b70a05844c0f6383b1" [[package]] name = "wasm-bindgen-test" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0355fa0c1f9b792a09b6dcb6a8be24d51e71e6d74972f9eb4a44c4c004d24a25" +checksum = "f0d4da138503a4cf86801b94d95781ee3619faa8feca830569cc6b54997b8b5c" dependencies = [ "console_error_panic_hook", "js-sys", @@ -10463,9 +10744,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27e07b46b98024c2ba2f9e83a10c2ef0515f057f2da299c1762a2017de80438b" +checksum = "c3199c33f06500c731d5544664c24d0c2b742b98debc6b1c6f0c6d6e8fb7c19b" dependencies = [ "proc-macro2", "quote", @@ -10488,7 +10769,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "js-sys", "parking_lot 0.11.1", "pin-utils", @@ -10748,9 +11029,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" +checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" dependencies = [ "js-sys", "wasm-bindgen", @@ -10803,6 +11084,12 @@ dependencies = [ "thiserror", ] +[[package]] +name = "widestring" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" + [[package]] name = "winapi" version = "0.2.8" @@ -10846,6 +11133,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "winreg" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "ws2_32-sys" version = "0.2.1" @@ -10879,7 +11175,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cc7bd8c983209ed5d527f44b01c41b7dc146fd960c61cf9e1d25399841dc271" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "log", "nohash-hasher", "parking_lot 0.11.1", diff --git a/Cargo.toml b/Cargo.toml index 4d8cfc3e9754d..3e4787770e053 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,6 @@ [workspace] +resolver = "2" + members = [ "bin/node-template/node", "bin/node-template/pallets/template", @@ -6,6 +8,7 @@ members = [ "bin/node/bench", "bin/node/browser-testing", "bin/node/cli", + "bin/node/test-runner-example", "bin/node/executor", "bin/node/primitives", "bin/node/rpc", @@ -76,6 +79,7 @@ members = [ "frame/try-runtime", "frame/elections", "frame/election-provider-multi-phase", + "frame/election-provider-support", "frame/example", "frame/example-offchain-worker", "frame/example-parallel", @@ -104,8 +108,8 @@ members = [ "frame/session/benchmarking", "frame/society", "frame/staking", - "frame/staking/fuzzer", "frame/staking/reward-curve", + "frame/staking/reward-fn", "frame/sudo", "frame/support", "frame/support/procedural", @@ -145,7 +149,6 @@ members = [ "primitives/database", "primitives/debug-derive", "primitives/externalities", - "primitives/election-providers", "primitives/finality-grandpa", "primitives/inherents", "primitives/io", @@ -184,6 +187,7 @@ members = [ "test-utils/runtime", "test-utils/runtime/client", "test-utils/runtime/transaction-pool", + "test-utils/test-runner", "test-utils/test-crate", "utils/browser", "utils/build-script-utils", diff --git a/bin/node-template/.editorconfig b/bin/node-template/.editorconfig new file mode 100644 index 0000000000000..5adac74ca24b3 --- /dev/null +++ b/bin/node-template/.editorconfig @@ -0,0 +1,16 @@ +root = true + +[*] +indent_style=space +indent_size=2 +tab_width=2 +end_of_line=lf +charset=utf-8 +trim_trailing_whitespace=true +insert_final_newline = true + +[*.{rs,toml}] +indent_style=tab +indent_size=tab +tab_width=4 +max_line_length=100 diff --git a/bin/node-template/README.md b/bin/node-template/README.md index 8c8b82a14bb86..cd977fac84493 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -1,97 +1,71 @@ # Substrate Node Template -A new FRAME-based Substrate node, ready for hacking :rocket: +A fresh FRAME-based [Substrate](https://www.substrate.io/) node, ready for hacking :rocket: -## Local Development +## Getting Started -Follow these steps to prepare a local Substrate development environment :hammer_and_wrench: +Follow these steps to get started with the Node Template :hammer_and_wrench: -### Simple Setup +### Rust Setup -Install all the required dependencies with a single command (be patient, this can take up to 30 -minutes). +First, complete the [basic Rust setup instructions](./doc/rust-setup.md). -```bash -curl https://getsubstrate.io -sSf | bash -s -- --fast -``` +### Run -### Manual Setup +Use Rust's native `cargo` command to build and launch the template node: -Find manual setup instructions at the -[Substrate Developer Hub](https://substrate.dev/docs/en/knowledgebase/getting-started/#manual-installation). +```sh +cargo run --release -- --dev --tmp +``` ### Build -Once the development environment is set up, build the node template. This command will build the -[Wasm](https://substrate.dev/docs/en/knowledgebase/advanced/executor#wasm-execution) and -[native](https://substrate.dev/docs/en/knowledgebase/advanced/executor#native-execution) code: +The `cargo run` command will perform an initial build. Use the following command to build the node +without launching it: -```bash +```sh cargo build --release ``` -## Run - -### Single Node Development Chain +### Embedded Docs -Purge any existing dev chain state: +Once the project has been built, the following command can be used to explore all parameters and +subcommands: -```bash -./target/release/node-template purge-chain --dev +```sh +./target/release/node-template -h ``` -Start a dev chain: +## Run -```bash -./target/release/node-template --dev -``` +The provided `cargo run` command will launch a temporary node and its state will be discarded after +you terminate the process. After the project has been built, there are other ways to launch the +node. + +### Single-Node Development Chain -Or, start a dev chain with detailed logging: +This command will start the single-node development chain with persistent state: ```bash -RUST_LOG=debug RUST_BACKTRACE=1 ./target/release/node-template -lruntime=debug --dev +./target/release/node-template --dev ``` -### Multi-Node Local Testnet - -To see the multi-node consensus algorithm in action, run a local testnet with two validator nodes, -Alice and Bob, that have been [configured](./node/src/chain_spec.rs) as the initial -authorities of the `local` testnet chain and endowed with testnet units. - -Note: this will require two terminal sessions (one for each node). - -Start Alice's node first. The command below uses the default TCP port (30333) and specifies -`/tmp/alice` as the chain database location. Alice's node ID will be -`12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp` (legacy representation: -`QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR`); this is determined by the `node-key`. +Purge the development chain's state: ```bash -cargo run -- \ - --base-path /tmp/alice \ - --chain=local \ - --alice \ - --node-key 0000000000000000000000000000000000000000000000000000000000000001 \ - --telemetry-url 'ws://telemetry.polkadot.io:1024 0' \ - --validator +./target/release/node-template purge-chain --dev ``` -In another terminal, use the following command to start Bob's node on a different TCP port (30334) -and with a chain database location of `/tmp/bob`. The `--bootnodes` option will connect his node to -Alice's on TCP port 30333: +Start the development chain with detailed logging: ```bash -cargo run -- \ - --base-path /tmp/bob \ - --bootnodes /ip4/127.0.0.1/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp \ - --chain=local \ - --bob \ - --port 30334 \ - --ws-port 9945 \ - --telemetry-url 'ws://telemetry.polkadot.io:1024 0' \ - --validator +RUST_BACKTRACE=1 ./target/release/node-template -ldebug --dev ``` -Execute `cargo run -- --help` to learn more about the template node's CLI options. +### Multi-Node Local Testnet + +If you want to see the multi-node consensus algorithm in action, refer to +[our Start a Private Network tutorial](https://substrate.dev/docs/en/tutorials/start-a-private-network/). ## Template Structure @@ -184,24 +158,28 @@ A FRAME pallet is compromised of a number of blockchain primitives: - Config: The `Config` configuration interface is used to define the types and parameters upon which a FRAME pallet depends. -## Generate a Custom Node Template +### Run in Docker -Generate a Substrate node template based on a particular commit by running the following commands: +First, install [Docker](https://docs.docker.com/get-docker/) and +[Docker Compose](https://docs.docker.com/compose/install/). + +Then run the following command to start a single node development chain. ```bash -# Clone from the main Substrate repo -git clone https://github.com/paritytech/substrate.git -cd substrate +./scripts/docker_run.sh +``` -# Switch to the branch or commit to base the template on -git checkout +This command will firstly compile your code, and then start a local development network. You can +also replace the default command (`cargo build --release && ./target/release/node-template --dev --ws-external`) +by appending your own. A few useful ones are as follow. -# Run the helper script to generate a node template. This script compiles Substrate, so it will take -# a while to complete. It expects a single parameter: the location for the script's output expressed -# as a relative path. -.maintain/node-template-release.sh ../node-template.tar.gz -``` +```bash +# Run Substrate node without re-compiling +./scripts/docker_run.sh ./target/release/node-template --dev --ws-external + +# Purge the local dev chain +./scripts/docker_run.sh ./target/release/node-template purge-chain --dev -Custom node templates are not supported. Please use a recently tagged version of the -[Substrate Developer Node Template](https://github.com/substrate-developer-hub/substrate-node-template) -in order to receive support. +# Check whether the code is compilable +./scripts/docker_run.sh cargo check +``` diff --git a/bin/node-template/docker-compose.yml b/bin/node-template/docker-compose.yml new file mode 100644 index 0000000000000..cfc4437bbae41 --- /dev/null +++ b/bin/node-template/docker-compose.yml @@ -0,0 +1,17 @@ +version: "3.2" + +services: + dev: + container_name: node-template + image: paritytech/ci-linux:974ba3ac-20201006 + working_dir: /var/www/node-template + ports: + - "9944:9944" + environment: + - CARGO_HOME=/var/www/node-template/.cargo + volumes: + - .:/var/www/node-template + - type: bind + source: ./.local + target: /root/.local + command: bash -c "cargo build --release && ./target/release/node-template --dev --ws-external" diff --git a/bin/node-template/docs/rust-setup.md b/bin/node-template/docs/rust-setup.md new file mode 100644 index 0000000000000..34f6e43e7f0dd --- /dev/null +++ b/bin/node-template/docs/rust-setup.md @@ -0,0 +1,81 @@ +--- +title: Installation +--- + +This page will guide you through the steps needed to prepare a computer for development with the +Substrate Node Template. Since Substrate is built with +[the Rust programming language](https://www.rust-lang.org/), the first thing you will need to do is +prepare the computer for Rust development - these steps will vary based on the computer's operating +system. Once Rust is configured, you will use its toolchains to interact with Rust projects; the +commands for Rust's toolchains will be the same for all supported, Unix-based operating systems. + +## Unix-Based Operating Systems + +Substrate development is easiest on Unix-based operating systems like macOS or Linux. The examples +in the Substrate [Tutorials](https://substrate.dev/tutorials) and [Recipes](https://substrate.dev/recipes/) +use Unix-style terminals to demonstrate how to interact with Substrate from the command line. + +### macOS + +Open the Terminal application and execute the following commands: + +```bash +# Install Homebrew if necessary https://brew.sh/ +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" + +# Make sure Homebrew is up-to-date, install openssl and cmake +brew update +brew install openssl cmake +``` + +### Ubuntu/Debian + +Use a terminal shell to execute the following commands: + +```bash +sudo apt update +# May prompt for location information +sudo apt install -y cmake pkg-config libssl-dev git build-essential clang libclang-dev curl +``` + +### Arch Linux + +Run these commands from a terminal: + +```bash +pacman -Syu --needed --noconfirm cmake gcc openssl-1.0 pkgconf git clang +export OPENSSL_LIB_DIR="/usr/lib/openssl-1.0" +export OPENSSL_INCLUDE_DIR="/usr/include/openssl-1.0" +``` + +### Fedora/RHEL/CentOS + +Use a terminal to run the following commands: + +```bash +# Update +sudo dnf update +# Install packages +sudo dnf install cmake pkgconfig rocksdb rocksdb-devel llvm git libcurl libcurl-devel curl-devel clang +``` + +## Rust Developer Environment + +This project uses [`rustup`](https://rustup.rs/) to help manage the Rust toolchain. First install +and configure `rustup`: + +```bash +# Install +curl https://sh.rustup.rs -sSf | sh +# Configure +source ~/.cargo/env +``` + +Finally, configure the Rust toolchain: + +```bash +rustup default stable +rustup update nightly +rustup update stable +rustup target add wasm32-unknown-unknown --toolchain nightly +``` diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 2d36d3c469083..e53320c940510 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "node-template" -version = "2.0.0" -authors = ["Anonymous"] -description = "A new FRAME-based Substrate node, ready for hacking." +version = "3.0.0" +authors = ["Substrate DevHub "] +description = "A fresh FRAME-based Substrate node, ready for hacking." edition = "2018" license = "Unlicense" build = "build.rs" homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -51,7 +51,7 @@ pallet-transaction-payment-rpc = { version = "3.0.0", path = "../../../frame/tra frame-benchmarking = { version = "3.1.0", path = "../../../frame/benchmarking" } frame-benchmarking-cli = { version = "3.0.0", path = "../../../utils/frame/benchmarking-cli" } -node-template-runtime = { version = "2.0.0", path = "../runtime" } +node-template-runtime = { version = "3.0.0", path = "../runtime" } [build-dependencies] substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build-script-utils" } diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index 9f0c6ee182670..df76d20a4a7e1 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -1,11 +1,11 @@ [package] -authors = ['Anonymous'] +authors = ['Substrate DevHub '] edition = '2018' name = 'pallet-template' -version = "2.0.0" +version = "3.0.0" license = "Unlicense" homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" description = "FRAME pallet template for defining custom runtime logic." readme = "README.md" @@ -14,40 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } - -[dependencies.frame-support] -default-features = false -version = "3.0.0" -path = "../../../../frame/support" - -[dependencies.frame-system] -default-features = false -version = "3.0.0" -path = "../../../../frame/system" - -[dependencies.frame-benchmarking] -default-features = false -version = "3.1.0" -path = "../../../../frame/benchmarking" -optional = true +frame-support = { default-features = false, version = "3.0.0", path = "../../../../frame/support" } +frame-system = { default-features = false, version = "3.0.0", path = "../../../../frame/system" } +frame-benchmarking = { default-features = false, version = "3.1.0", path = "../../../../frame/benchmarking", optional = true } [dev-dependencies] -serde = { version = "1.0.101" } - -[dev-dependencies.sp-core] -default-features = false -version = "3.0.0" -path = "../../../../primitives/core" - -[dev-dependencies.sp-io] -default-features = false -version = "3.0.0" -path = "../../../../primitives/io" - -[dev-dependencies.sp-runtime] -default-features = false -version = "3.0.0" -path = "../../../../primitives/runtime" +serde = { version = "1.0.119" } +sp-core = { default-features = false, version = "3.0.0", path = "../../../../primitives/core" } +sp-io = { default-features = false, version = "3.0.0", path = "../../../../primitives/io" } +sp-runtime = { default-features = false, version = "3.0.0", path = "../../../../primitives/runtime" } [features] default = ['std'] @@ -57,5 +32,6 @@ std = [ 'frame-system/std', 'frame-benchmarking/std', ] + runtime-benchmarks = ["frame-benchmarking"] try-runtime = ["frame-support/try-runtime"] diff --git a/bin/node-template/pallets/template/src/benchmarking.rs b/bin/node-template/pallets/template/src/benchmarking.rs index 5296ed7261d98..93d7fa395ad6b 100644 --- a/bin/node-template/pallets/template/src/benchmarking.rs +++ b/bin/node-template/pallets/template/src/benchmarking.rs @@ -5,7 +5,7 @@ use super::*; use frame_system::RawOrigin; use frame_benchmarking::{benchmarks, whitelisted_caller, impl_benchmark_test_suite}; #[allow(unused)] -use crate::Module as Template; +use crate::Pallet as Template; benchmarks! { do_something { diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 99a285492c77f..7b986a5186692 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -17,7 +17,7 @@ mod benchmarking; #[frame_support::pallet] pub mod pallet { - use frame_support::{dispatch::DispatchResultWithPostInfo, pallet_prelude::*}; + use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; use frame_system::pallet_prelude::*; /// Configure the pallet by specifying the parameters and types on which it depends. @@ -70,7 +70,7 @@ pub mod pallet { /// An example dispatchable that takes a singles value as a parameter, writes the value to /// storage and emits an event. This function must be dispatched by a signed extrinsic. #[pallet::weight(10_000 + T::DbWeight::get().writes(1))] - pub fn do_something(origin: OriginFor, something: u32) -> DispatchResultWithPostInfo { + pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. // https://substrate.dev/docs/en/knowledgebase/runtime/origin @@ -82,12 +82,12 @@ pub mod pallet { // Emit an event. Self::deposit_event(Event::SomethingStored(something, who)); // Return a successful DispatchResultWithPostInfo - Ok(().into()) + Ok(()) } /// An example dispatchable that may throw a custom error. #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1))] - pub fn cause_error(origin: OriginFor) -> DispatchResultWithPostInfo { + pub fn cause_error(origin: OriginFor) -> DispatchResult { let _who = ensure_signed(origin)?; // Read a value from storage. @@ -99,7 +99,7 @@ pub mod pallet { let new = old.checked_add(1).ok_or(Error::::StorageOverflow)?; // Update the value in storage with the incremented result. >::put(new); - Ok(().into()) + Ok(()) }, } } diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index d33670f2e9cb0..1ebe3bee6090c 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -7,7 +7,7 @@ use sp_runtime::{ use frame_system as system; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; +type Block = frame_system::mocking::MockBlock; // Configure a mock runtime to test the pallet. frame_support::construct_runtime!( @@ -16,8 +16,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - TemplateModule: pallet_template::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + TemplateModule: pallet_template::{Pallet, Call, Storage, Event}, } ); diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index d4e202d688c87..5bba2a4e970b0 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -1,11 +1,11 @@ [package] name = "node-template-runtime" -version = "2.0.0" -authors = ["Anonymous"] +version = "3.0.0" +authors = ["Substrate DevHub "] edition = "2018" license = "Unlicense" homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -45,7 +45,7 @@ frame-benchmarking = { version = "3.1.0", default-features = false, path = "../. frame-system-benchmarking = { version = "3.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } hex-literal = { version = "0.3.1", optional = true } -template = { version = "2.0.0", default-features = false, path = "../pallets/template", package = "pallet-template" } +pallet-template = { version = "3.0.0", default-features = false, path = "../pallets/template" } [build-dependencies] substrate-wasm-builder = { version = "4.0.0", path = "../../../utils/wasm-builder" } @@ -56,14 +56,17 @@ std = [ "codec/std", "frame-executive/std", "frame-support/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", "pallet-aura/std", "pallet-balances/std", "pallet-grandpa/std", "pallet-randomness-collective-flip/std", "pallet-sudo/std", + "pallet-template/std", "pallet-timestamp/std", - "pallet-transaction-payment/std", "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", "serde", "sp-api/std", "sp-block-builder/std", @@ -76,18 +79,15 @@ std = [ "sp-std/std", "sp-transaction-pool/std", "sp-version/std", - "frame-system/std", - "frame-system-rpc-runtime-api/std", - "template/std", ] runtime-benchmarks = [ - "sp-runtime/runtime-benchmarks", "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system-benchmarking", - "hex-literal", "frame-system/runtime-benchmarks", + "hex-literal", "pallet-balances/runtime-benchmarks", + "pallet-template/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", - "template/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", ] diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 0f026db5735cc..1675b3d2a1cdc 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -40,7 +40,7 @@ pub use frame_support::{ use pallet_transaction_payment::CurrencyAdapter; /// Import the template pallet. -pub use template; +pub use pallet_template; /// An index to a block. pub type BlockNumber = u32; @@ -92,17 +92,24 @@ pub mod opaque { } } +// To learn more about runtime versioning and what each of the following value means: +// https://substrate.dev/docs/en/knowledgebase/runtime/upgrades#runtime-versioning pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node-template"), impl_name: create_runtime_str!("node-template"), authoring_version: 1, - spec_version: 1, + // The version of the runtime specification. A full node will not attempt to use its native + // runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, + // `spec_version`, and `authoring_version` are the same between Wasm and native. + // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use + // the compatible custom types. + spec_version: 100, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, }; -/// This determines the average expected block time that we are targetting. +/// This determines the average expected block time that we are targeting. /// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. /// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked /// up by `pallet_aura` to implement `fn slot_duration()`. @@ -258,8 +265,8 @@ impl pallet_sudo::Config for Runtime { type Call = Call; } -/// Configure the pallet template in pallets/template. -impl template::Config for Runtime { +/// Configure the pallet-template in pallets/template. +impl pallet_template::Config for Runtime { type Event = Event; } @@ -270,16 +277,16 @@ construct_runtime!( NodeBlock = opaque::Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Module, Call, Config, Storage, Event}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage}, - Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, - Aura: pallet_aura::{Module, Config}, - Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Module, Storage}, - Sudo: pallet_sudo::{Module, Call, Config, Storage, Event}, - // Include the custom logic from the template pallet in the runtime. - TemplateModule: template::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Aura: pallet_aura::{Pallet, Config}, + Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, + Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, + // Include the custom logic from the pallet-template in the runtime. + TemplateModule: pallet_template::{Pallet, Call, Storage, Event}, } ); @@ -313,7 +320,7 @@ pub type Executive = frame_executive::Executive< Block, frame_system::ChainContext, Runtime, - AllModules, + AllPallets, >; impl_runtime_apis! { @@ -378,8 +385,8 @@ impl_runtime_apis! { } impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { - Aura::slot_duration() + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) } fn authorities() -> Vec { @@ -453,7 +460,7 @@ impl_runtime_apis! { ) -> Result, sp_runtime::RuntimeString> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; - use frame_system_benchmarking::Module as SystemBench; + use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime {} let whitelist: Vec = vec![ @@ -475,7 +482,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, frame_system, SystemBench::); add_benchmark!(params, batches, pallet_balances, Balances); add_benchmark!(params, batches, pallet_timestamp, Timestamp); - add_benchmark!(params, batches, template, TemplateModule); + add_benchmark!(params, batches, pallet_template, TemplateModule); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } Ok(batches) diff --git a/bin/node-template/scripts/docker_run.sh b/bin/node-template/scripts/docker_run.sh new file mode 100644 index 0000000000000..0bac44b4cfb3b --- /dev/null +++ b/bin/node-template/scripts/docker_run.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# This script is meant to be run on Unix/Linux based systems +set -e + +echo "*** Start Substrate node template ***" + +cd $(dirname ${BASH_SOURCE[0]})/.. + +docker-compose down --remove-orphans +docker-compose run --rm --service-ports dev $@ diff --git a/bin/node-template/scripts/init.sh b/bin/node-template/scripts/init.sh index 1405a41ef333e..f976f7235d700 100755 --- a/bin/node-template/scripts/init.sh +++ b/bin/node-template/scripts/init.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash - +# This script is meant to be run on Unix/Linux based systems set -e echo "*** Initializing WASM build environment" diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index fe83cc65ba632..292ee2cab6bf7 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -8,11 +8,11 @@ license = "Apache-2.0" [dependencies] futures-timer = "3.0.2" -libp2p = { version = "0.35.1", default-features = false } +libp2p = { version = "0.36.0", default-features = false } jsonrpc-core = "15.0.0" serde = "1.0.106" serde_json = "1.0.48" -wasm-bindgen = { version = "=0.2.70", features = ["serde-serialize"] } +wasm-bindgen = { version = "=0.2.71", features = ["serde-serialize"] } wasm-bindgen-futures = "0.4.18" wasm-bindgen-test = "0.3.18" futures = "0.3.9" diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index ebba2095e6be3..9449edfbf6e08 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -105,7 +105,7 @@ try-runtime-cli = { version = "0.9.0", optional = true, path = "../../../utils/f wasm-bindgen = { version = "0.2.57", optional = true } wasm-bindgen-futures = { version = "0.4.18", optional = true } browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.9.0"} -libp2p-wasm-ext = { version = "0.27", features = ["websocket"], optional = true } +libp2p-wasm-ext = { version = "0.28", features = ["websocket"], optional = true } [target.'cfg(target_arch="x86_64")'.dependencies] node-executor = { version = "2.0.0", path = "../executor", features = [ "wasmtime" ] } @@ -116,7 +116,7 @@ sp-trie = { version = "3.0.0", default-features = false, path = "../../../primit [dev-dependencies] sc-keystore = { version = "3.0.0", path = "../../../client/keystore" } sc-consensus = { version = "0.9.0", path = "../../../client/consensus/common" } -sc-consensus-babe = { version = "0.9.0", features = ["test-helpers"], path = "../../../client/consensus/babe" } +sc-consensus-babe = { version = "0.9.0", path = "../../../client/consensus/babe" } sc-consensus-epochs = { version = "0.9.0", path = "../../../client/consensus/epochs" } sc-service-test = { version = "2.0.0", path = "../../../client/service/test" } futures = "0.3.9" diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index ae1418981f167..c30710d236acc 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -295,10 +295,9 @@ pub fn testnet_genesis( phantom: Default::default(), }, pallet_contracts: ContractsConfig { - current_schedule: pallet_contracts::Schedule { - enable_println, // this should only be enabled on development chains - ..Default::default() - }, + // println should only be enabled on development chains + current_schedule: pallet_contracts::Schedule::default() + .enable_println(enable_println), }, pallet_sudo: SudoConfig { key: root_key, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 1351782315be7..ce0ffb2cecc0a 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -534,7 +534,7 @@ pub fn new_light( #[cfg(test)] mod tests { - use std::{sync::Arc, borrow::Cow, any::Any, convert::TryInto}; + use std::{sync::Arc, borrow::Cow, convert::TryInto}; use sc_consensus_babe::{CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY}; use sc_consensus_epochs::descendent_query; use sp_consensus::{ @@ -638,27 +638,34 @@ mod tests { None, ); - let epoch_descriptor = babe_link.epoch_changes().lock().epoch_descriptor_for_child_of( - descendent_query(&*service.client()), - &parent_hash, - parent_number, - slot.into(), - ).unwrap().unwrap(); - let mut digest = Digest::::default(); // even though there's only one authority some slots might be empty, // so we must keep trying the next slots until we can claim one. - let babe_pre_digest = loop { - inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, &(slot * SLOT_DURATION)); - if let Some(babe_pre_digest) = sc_consensus_babe::test_helpers::claim_slot( + let (babe_pre_digest, epoch_descriptor) = loop { + inherent_data.replace_data( + sp_timestamp::INHERENT_IDENTIFIER, + &(slot * SLOT_DURATION), + ); + + let epoch_descriptor = babe_link.epoch_changes().shared_data().epoch_descriptor_for_child_of( + descendent_query(&*service.client()), + &parent_hash, + parent_number, + slot.into(), + ).unwrap().unwrap(); + + let epoch = babe_link.epoch_changes().shared_data().epoch_data( + &epoch_descriptor, + |slot| sc_consensus_babe::Epoch::genesis(&babe_link.config(), slot), + ).unwrap(); + + if let Some(babe_pre_digest) = sc_consensus_babe::authorship::claim_slot( slot.into(), - &parent_header, - &*service.client(), - keystore.clone(), - &babe_link, - ) { - break babe_pre_digest; + &epoch, + &keystore, + ).map(|(digest, _)| digest) { + break (babe_pre_digest, epoch_descriptor) } slot += 1; @@ -696,11 +703,11 @@ mod tests { params.body = Some(new_body); params.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - block_import.import_block(params, Default::default()) + futures::executor::block_on(block_import.import_block(params, Default::default())) .expect("error importing test block"); }, |service, _| { diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index fb7fc9191141c..54a44d59c2591 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -44,6 +44,7 @@ sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } sp-externalities = { version = "0.9.0", path = "../../../primitives/externalities" } substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } wat = "1.0" +futures = "0.3.9" [features] wasmtime = [ diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 279b6a776031a..fe3ae5f14cc37 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -600,13 +600,13 @@ fn deploying_wasm_contract_should_work() { let transfer_code = wat::parse_str(CODE_TRANSFER).unwrap(); let transfer_ch = ::Hashing::hash(&transfer_code); - let addr = pallet_contracts::Module::::contract_address( + let addr = pallet_contracts::Pallet::::contract_address( &charlie(), &transfer_ch, &[], ); - let subsistence = pallet_contracts::Module::::subsistence_threshold(); + let subsistence = pallet_contracts::Pallet::::subsistence_threshold(); let time = 42 * 1000; let b = construct_block( @@ -656,13 +656,10 @@ fn deploying_wasm_contract_should_work() { ).0.unwrap(); t.execute_with(|| { - // Verify that the contract constructor worked well and code of TRANSFER contract is actually deployed. - assert_eq!( - &pallet_contracts::ContractInfoOf::::get(addr) - .and_then(|c| c.get_alive()) - .unwrap() - .code_hash, - &transfer_ch + // Verify that the contract does exist by querying some of its storage items + // It does not matter that the storage item itself does not exist. + assert!( + &pallet_contracts::Pallet::::get_storage(addr, Default::default()).is_ok() ); }); } @@ -844,5 +841,5 @@ fn should_import_block_with_test_client() { let block_data = block1.0; let block = node_primitives::Block::decode(&mut &block_data[..]).unwrap(); - client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 43ecca7e74456..f0cad60f2614d 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -36,6 +36,7 @@ sp-keyring = { version = "3.0.0", optional = true, path = "../../../primitives/k sp-session = { version = "3.0.0", default-features = false, path = "../../../primitives/session" } sp-transaction-pool = { version = "3.0.0", default-features = false, path = "../../../primitives/transaction-pool" } sp-version = { version = "3.0.0", default-features = false, path = "../../../primitives/version" } +sp-npos-elections = { version = "3.0.0", default-features = false, path = "../../../primitives/npos-elections" } # frame dependencies frame-executive = { version = "3.0.0", default-features = false, path = "../../../frame/executive" } @@ -159,6 +160,7 @@ std = [ "pallet-vesting/std", "log/std", "frame-try-runtime/std", + "sp-npos-elections/std", ] runtime-benchmarks = [ "frame-benchmarking", diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index c6a56e5ac0dab..416266119cb09 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -84,7 +84,7 @@ mod multiplier_tests { let t1 = v * (s/m - ss/m); let t2 = v.powi(2) * (s/m - ss/m).powi(2) / 2.0; let next_float = previous_float * (1.0 + t1 + t2); - Multiplier::from_fraction(next_float) + Multiplier::from_float(next_float) } fn run_with_system_weight(w: Weight, assertions: F) where F: Fn() -> () { diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index bb372f31c73b9..f790cf41a401b 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -114,7 +114,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. spec_version: 265, - impl_version: 0, + impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 2, }; @@ -379,7 +379,7 @@ impl pallet_balances::Config for Runtime { type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Module; + type AccountStore = frame_system::Pallet; type WeightInfo = pallet_balances::weights::SubstrateWeight; } @@ -468,17 +468,11 @@ parameter_types! { pub const SlashDeferDuration: pallet_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominatorRewardedPerValidator: u32 = 256; - pub const ElectionLookahead: BlockNumber = EPOCH_DURATION_IN_BLOCKS / 4; - pub const MaxIterations: u32 = 10; - // 0.05%. The higher the value, the more strict solution acceptance becomes. - pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000); - pub OffchainSolutionWeightLimit: Weight = RuntimeBlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic.expect("Normal extrinsics have a weight limit configured; qed") - .saturating_sub(BlockExecutionWeight::get()); } impl pallet_staking::Config for Runtime { + const MAX_NOMINATIONS: u32 = + ::LIMIT as u32; type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = U128CurrencyToVote; @@ -496,17 +490,9 @@ impl pallet_staking::Config for Runtime { pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective> >; type SessionInterface = Self; - type RewardCurve = RewardCurve; + type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type ElectionLookahead = ElectionLookahead; - type Call = Call; - type MaxIterations = MaxIterations; - type MinSolutionScoreBump = MinSolutionScoreBump; - type UnsignedPriority = StakingUnsignedPriority; - // The unsigned solution weight targeted by the OCW. We set it to the maximum possible value of - // a single extrinsic. - type OffchainSolutionWeightLimit = OffchainSolutionWeightLimit; type ElectionProvider = ElectionProviderMultiPhase; type WeightInfo = pallet_staking::weights::SubstrateWeight; } @@ -518,9 +504,9 @@ parameter_types! { // fallback: no need to do on-chain phragmen initially. pub const Fallback: pallet_election_provider_multi_phase::FallbackStrategy = - pallet_election_provider_multi_phase::FallbackStrategy::Nothing; + pallet_election_provider_multi_phase::FallbackStrategy::OnChain; - pub SolutionImprovementThreshold: Perbill = Perbill::from_rational_approximation(1u32, 10_000); + pub SolutionImprovementThreshold: Perbill = Perbill::from_rational(1u32, 10_000); // miner configs pub const MultiPhaseUnsignedPriority: TransactionPriority = StakingUnsignedPriority::get() - 1u64; @@ -531,18 +517,27 @@ parameter_types! { .saturating_sub(BlockExecutionWeight::get()); } +sp_npos_elections::generate_solution_type!( + #[compact] + pub struct NposCompactSolution16::< + VoterIndex = u32, + TargetIndex = u16, + Accuracy = sp_runtime::PerU16, + >(16) +); + impl pallet_election_provider_multi_phase::Config for Runtime { type Event = Event; type Currency = Balances; type SignedPhase = SignedPhase; type UnsignedPhase = UnsignedPhase; - type SolutionImprovementThreshold = MinSolutionScoreBump; + type SolutionImprovementThreshold = SolutionImprovementThreshold; type MinerMaxIterations = MinerMaxIterations; type MinerMaxWeight = MinerMaxWeight; type MinerTxPriority = MultiPhaseUnsignedPriority; type DataProvider = Staking; type OnChainAccuracy = Perbill; - type CompactSolution = pallet_staking::CompactAssignments; + type CompactSolution = NposCompactSolution16; type Fallback = Fallback; type WeightInfo = pallet_election_provider_multi_phase::weights::SubstrateWeight; type BenchmarkingConfig = (); @@ -760,14 +755,14 @@ impl pallet_tips::Config for Runtime { } parameter_types! { - pub const TombstoneDeposit: Balance = deposit( + pub TombstoneDeposit: Balance = deposit( 1, - sp_std::mem::size_of::>() as u32 + >::contract_info_size(), ); - pub const DepositPerContract: Balance = TombstoneDeposit::get(); + pub DepositPerContract: Balance = TombstoneDeposit::get(); pub const DepositPerStorageByte: Balance = deposit(0, 1); pub const DepositPerStorageItem: Balance = deposit(1, 0); - pub RentFraction: Perbill = Perbill::from_rational_approximation(1u32, 30 * DAYS); + pub RentFraction: Perbill = Perbill::from_rational(1u32, 30 * DAYS); pub const SurchargeReward: Balance = 150 * MILLICENTS; pub const SignedClaimHandicap: u32 = 2; pub const MaxDepth: u32 = 32; @@ -969,6 +964,7 @@ parameter_types! { pub const PeriodSpend: Balance = 500 * DOLLARS; pub const MaxLockDuration: BlockNumber = 36 * 30 * DAYS; pub const ChallengePeriod: BlockNumber = 7 * DAYS; + pub const MaxCandidateIntake: u32 = 10; pub const SocietyModuleId: ModuleId = ModuleId(*b"py/socie"); } @@ -986,6 +982,7 @@ impl pallet_society::Config for Runtime { type MaxLockDuration = MaxLockDuration; type FounderSetOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; type SuspensionJudgementOrigin = pallet_society::EnsureFounder; + type MaxCandidateIntake = MaxCandidateIntake; type ChallengePeriod = ChallengePeriod; } @@ -1005,7 +1002,7 @@ impl pallet_mmr::Config for Runtime { const INDEXING_PREFIX: &'static [u8] = b"mmr"; type Hashing = ::Hashing; type Hash = ::Hash; - type LeafData = frame_system::Module; + type LeafData = frame_system::Pallet; type OnNewRoot = (); type WeightInfo = (); } @@ -1048,6 +1045,8 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = MetadataDepositPerByte; type ApprovalDeposit = ApprovalDeposit; type StringLimit = StringLimit; + type Freezer = (); + type Extra = (); type WeightInfo = pallet_assets::weights::SubstrateWeight; } @@ -1065,6 +1064,7 @@ parameter_types! { impl pallet_gilt::Config for Runtime { type Event = Event; type Currency = Balances; + type CurrencyBalance = Balance; type AdminOrigin = frame_system::EnsureRoot; type Deficit = (); type Surplus = (); @@ -1085,44 +1085,44 @@ construct_runtime!( NodeBlock = node_primitives::Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Module, Call, Config, Storage, Event}, - Utility: pallet_utility::{Module, Call, Event}, - Babe: pallet_babe::{Module, Call, Storage, Config, ValidateUnsigned}, - Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, - Authorship: pallet_authorship::{Module, Call, Storage, Inherent}, - Indices: pallet_indices::{Module, Call, Storage, Config, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Module, Storage}, - ElectionProviderMultiPhase: pallet_election_provider_multi_phase::{Module, Call, Storage, Event, ValidateUnsigned}, - Staking: pallet_staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, - Democracy: pallet_democracy::{Module, Call, Storage, Config, Event}, - Council: pallet_collective::::{Module, Call, Storage, Origin, Event, Config}, - TechnicalCommittee: pallet_collective::::{Module, Call, Storage, Origin, Event, Config}, - Elections: pallet_elections_phragmen::{Module, Call, Storage, Event, Config}, - TechnicalMembership: pallet_membership::::{Module, Call, Storage, Event, Config}, - Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event, ValidateUnsigned}, - Treasury: pallet_treasury::{Module, Call, Storage, Config, Event}, - Contracts: pallet_contracts::{Module, Call, Config, Storage, Event}, - Sudo: pallet_sudo::{Module, Call, Config, Storage, Event}, - ImOnline: pallet_im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, - AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config}, - Offences: pallet_offences::{Module, Call, Storage, Event}, - Historical: pallet_session_historical::{Module}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage}, - Identity: pallet_identity::{Module, Call, Storage, Event}, - Society: pallet_society::{Module, Call, Storage, Event, Config}, - Recovery: pallet_recovery::{Module, Call, Storage, Event}, - Vesting: pallet_vesting::{Module, Call, Storage, Event, Config}, - Scheduler: pallet_scheduler::{Module, Call, Storage, Event}, - Proxy: pallet_proxy::{Module, Call, Storage, Event}, - Multisig: pallet_multisig::{Module, Call, Storage, Event}, - Bounties: pallet_bounties::{Module, Call, Storage, Event}, - Tips: pallet_tips::{Module, Call, Storage, Event}, - Assets: pallet_assets::{Module, Call, Storage, Event}, - Mmr: pallet_mmr::{Module, Storage}, - Lottery: pallet_lottery::{Module, Call, Storage, Event}, - Gilt: pallet_gilt::{Module, Call, Storage, Event, Config}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Utility: pallet_utility::{Pallet, Call, Event}, + Babe: pallet_babe::{Pallet, Call, Storage, Config, ValidateUnsigned}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Authorship: pallet_authorship::{Pallet, Call, Storage, Inherent}, + Indices: pallet_indices::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, + ElectionProviderMultiPhase: pallet_election_provider_multi_phase::{Pallet, Call, Storage, Event, ValidateUnsigned}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Democracy: pallet_democracy::{Pallet, Call, Storage, Config, Event}, + Council: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config}, + TechnicalCommittee: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config}, + Elections: pallet_elections_phragmen::{Pallet, Call, Storage, Event, Config}, + TechnicalMembership: pallet_membership::::{Pallet, Call, Storage, Event, Config}, + Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event, ValidateUnsigned}, + Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, + Contracts: pallet_contracts::{Pallet, Call, Config, Storage, Event}, + Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, + ImOnline: pallet_im_online::{Pallet, Call, Storage, Event, ValidateUnsigned, Config}, + AuthorityDiscovery: pallet_authority_discovery::{Pallet, Call, Config}, + Offences: pallet_offences::{Pallet, Call, Storage, Event}, + Historical: pallet_session_historical::{Pallet}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage}, + Identity: pallet_identity::{Pallet, Call, Storage, Event}, + Society: pallet_society::{Pallet, Call, Storage, Event, Config}, + Recovery: pallet_recovery::{Pallet, Call, Storage, Event}, + Vesting: pallet_vesting::{Pallet, Call, Storage, Event, Config}, + Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event}, + Proxy: pallet_proxy::{Pallet, Call, Storage, Event}, + Multisig: pallet_multisig::{Pallet, Call, Storage, Event}, + Bounties: pallet_bounties::{Pallet, Call, Storage, Event}, + Tips: pallet_tips::{Pallet, Call, Storage, Event}, + Assets: pallet_assets::{Pallet, Call, Storage, Event}, + Mmr: pallet_mmr::{Pallet, Storage}, + Lottery: pallet_lottery::{Pallet, Call, Storage, Event}, + Gilt: pallet_gilt::{Pallet, Call, Storage, Event, Config}, } ); @@ -1162,7 +1162,7 @@ pub type Executive = frame_executive::Executive< Block, frame_system::ChainContext, Runtime, - AllModules, + AllPallets, (), >; @@ -1434,9 +1434,9 @@ impl_runtime_apis! { // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency // issues. To get around that, we separated the Session benchmarks into its own crate, // which is why we need these two lines below. - use pallet_session_benchmarking::Module as SessionBench; - use pallet_offences_benchmarking::Module as OffencesBench; - use frame_system_benchmarking::Module as SystemBench; + use pallet_session_benchmarking::Pallet as SessionBench; + use pallet_offences_benchmarking::Pallet as OffencesBench; + use frame_system_benchmarking::Pallet as SystemBench; impl pallet_session_benchmarking::Config for Runtime {} impl pallet_offences_benchmarking::Config for Runtime {} diff --git a/bin/node/test-runner-example/Cargo.toml b/bin/node/test-runner-example/Cargo.toml new file mode 100644 index 0000000000000..f94575e8e621b --- /dev/null +++ b/bin/node/test-runner-example/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "test-runner-example" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +publish = false + +[dependencies] +test-runner = { path = "../../../test-utils/test-runner", version = "0.9.0" } + +frame-system = { version = "3.0.0", path = "../../../frame/system" } +frame-support = { path = "../../../frame/support", version = "3.0.0" } +frame-benchmarking = { path = "../../../frame/benchmarking", version = "3.0.0" } +pallet-balances = { path = "../../../frame/balances", version = "3.0.0" } +pallet-sudo = { path = "../../../frame/sudo", version = "3.0.0" } +pallet-transaction-payment = { version = "3.0.0", path = "../../../frame/transaction-payment" } + +node-runtime = { path = "../runtime", version = "2.0.1" } +node-primitives = { version = "2.0.0", path = "../primitives" } +node-cli = { path = "../cli", version = "2.0.0" } + +grandpa = { version = "0.9.0", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +sp-consensus-babe = { version = "0.9.0", path = "../../../primitives/consensus/babe" } +sc-consensus-babe = { version = "0.9.0", path = "../../../client/consensus/babe" } +sc-consensus-manual-seal = { version = "0.9.0", path = "../../../client/consensus/manual-seal" } +sc-service = { version = "0.9.0", default-features = false, path = "../../../client/service" } +sc-executor = { version = "0.9.0", path = "../../../client/executor" } +sc-client-api = { version = "3.0.0", path = "../../../client/api" } +sc-network = { version = "0.9.0", path = "../../../client/network" } +sc-informant = { version = "0.9.0", path = "../../../client/informant" } +sc-consensus = { version = "0.9.0", path = "../../../client/consensus/common" } + +sp-runtime = { path = "../../../primitives/runtime", version = "3.0.0" } +sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } +sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } +sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } + +rand = "0.8.3" +log = "0.4.14" diff --git a/bin/node/test-runner-example/src/lib.rs b/bin/node/test-runner-example/src/lib.rs new file mode 100644 index 0000000000000..22cfffa7f23a7 --- /dev/null +++ b/bin/node/test-runner-example/src/lib.rs @@ -0,0 +1,202 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Basic example of end to end runtime tests. + +use test_runner::{Node, ChainInfo, SignatureVerificationOverride}; +use grandpa::GrandpaBlockImport; +use sc_service::{TFullBackend, TFullClient, Configuration, TaskManager, new_full_parts}; +use std::sync::Arc; +use sp_inherents::InherentDataProviders; +use sc_consensus_babe::BabeBlockImport; +use sp_keystore::SyncCryptoStorePtr; +use sp_keyring::sr25519::Keyring::Alice; +use sp_consensus_babe::AuthorityId; +use sc_consensus_manual_seal::{ConsensusDataProvider, consensus::babe::BabeConsensusDataProvider}; +use sp_runtime::{traits::IdentifyAccount, MultiSigner, generic::Era}; + +type BlockImport = BabeBlockImport>; + +sc_executor::native_executor_instance!( + pub Executor, + node_runtime::api::dispatch, + node_runtime::native_version, + ( + frame_benchmarking::benchmarking::HostFunctions, + SignatureVerificationOverride, + ) +); + +/// ChainInfo implementation. +struct NodeTemplateChainInfo; + +impl ChainInfo for NodeTemplateChainInfo { + type Block = node_primitives::Block; + type Executor = Executor; + type Runtime = node_runtime::Runtime; + type RuntimeApi = node_runtime::RuntimeApi; + type SelectChain = sc_consensus::LongestChain, Self::Block>; + type BlockImport = BlockImport< + Self::Block, + TFullBackend, + TFullClient, + Self::SelectChain, + >; + type SignedExtras = node_runtime::SignedExtra; + + fn signed_extras(from: ::AccountId) -> Self::SignedExtras { + ( + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckMortality::::from(Era::Immortal), + frame_system::CheckNonce::::from(frame_system::Pallet::::account_nonce(from)), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(0), + ) + } + + fn create_client_parts( + config: &Configuration, + ) -> Result< + ( + Arc>, + Arc>, + SyncCryptoStorePtr, + TaskManager, + InherentDataProviders, + Option< + Box< + dyn ConsensusDataProvider< + Self::Block, + Transaction = sp_api::TransactionFor< + TFullClient, + Self::Block, + >, + >, + >, + >, + Self::SelectChain, + Self::BlockImport, + ), + sc_service::Error, + > { + let (client, backend, keystore, task_manager) = + new_full_parts::(config, None)?; + let client = Arc::new(client); + + let inherent_providers = InherentDataProviders::new(); + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let (grandpa_block_import, ..) = + grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + None + )?; + + let (block_import, babe_link) = sc_consensus_babe::block_import( + sc_consensus_babe::Config::get_or_compute(&*client)?, + grandpa_block_import, + client.clone(), + )?; + + let consensus_data_provider = BabeConsensusDataProvider::new( + client.clone(), + keystore.sync_keystore(), + &inherent_providers, + babe_link.epoch_changes().clone(), + vec![(AuthorityId::from(Alice.public()), 1000)], + ) + .expect("failed to create ConsensusDataProvider"); + + Ok(( + client, + backend, + keystore.sync_keystore(), + task_manager, + inherent_providers, + Some(Box::new(consensus_data_provider)), + select_chain, + block_import, + )) + } + + fn dispatch_with_root(call: ::Call, node: &mut Node) { + let alice = MultiSigner::from(Alice.public()).into_account(); + let call = pallet_sudo::Call::sudo(Box::new(call)); + node.submit_extrinsic(call, alice); + node.seal_blocks(1); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use test_runner::NodeConfig; + use log::LevelFilter; + use sc_client_api::execution_extensions::ExecutionStrategies; + use node_cli::chain_spec::development_config; + + #[test] + fn test_runner() { + let config = NodeConfig { + execution_strategies: ExecutionStrategies { + syncing: sc_client_api::ExecutionStrategy::AlwaysWasm, + importing: sc_client_api::ExecutionStrategy::AlwaysWasm, + block_construction: sc_client_api::ExecutionStrategy::AlwaysWasm, + offchain_worker: sc_client_api::ExecutionStrategy::AlwaysWasm, + other: sc_client_api::ExecutionStrategy::AlwaysWasm, + }, + chain_spec: Box::new(development_config()), + log_targets: vec![ + ("yamux", LevelFilter::Off), + ("multistream_select", LevelFilter::Off), + ("libp2p", LevelFilter::Off), + ("jsonrpc_client_transports", LevelFilter::Off), + ("sc_network", LevelFilter::Off), + ("tokio_reactor", LevelFilter::Off), + ("parity-db", LevelFilter::Off), + ("sub-libp2p", LevelFilter::Off), + ("sync", LevelFilter::Off), + ("peerset", LevelFilter::Off), + ("ws", LevelFilter::Off), + ("sc_network", LevelFilter::Off), + ("sc_service", LevelFilter::Off), + ("sc_basic_authorship", LevelFilter::Off), + ("telemetry-logger", LevelFilter::Off), + ("sc_peerset", LevelFilter::Off), + ("rpc", LevelFilter::Off), + ("runtime", LevelFilter::Trace), + ("babe", LevelFilter::Debug) + ], + }; + let mut node = Node::::new(config).unwrap(); + // seals blocks + node.seal_blocks(1); + // submit extrinsics + let alice = MultiSigner::from(Alice.public()).into_account(); + node.submit_extrinsic(frame_system::Call::remark((b"hello world").to_vec()), alice); + + // look ma, I can read state. + let _events = node.with_state(|| frame_system::Pallet::::events()); + // get access to the underlying client. + let _client = node.client(); + } +} diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index cc6d7587dd517..edb99c617771a 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -691,7 +691,7 @@ impl BenchContext { assert_eq!(self.client.chain_info().best_number, 0); assert_eq!( - self.client.import_block(import_params, Default::default()) + futures::executor::block_on(self.client.import_block(import_params, Default::default())) .expect("Failed to import block"), ImportResult::Imported( ImportedAux { diff --git a/bin/utils/subkey/README.adoc b/bin/utils/subkey/README.adoc index 5ce0d2d324470..b82213777e93a 100644 --- a/bin/utils/subkey/README.adoc +++ b/bin/utils/subkey/README.adoc @@ -12,7 +12,7 @@ Subkey is a commandline utility included with Substrate that generates or restor subkey generate ``` -Will output a mnemonic phrase and give you the seed, public key, and address of a new account. DO NOT SHARE your mnemonic or seed with ANYONE it will give them access to your funds. If someone is making a transfer to you they will only need your **Address**. +Will output a secret phrase("mnemonic phrase") and give you the secret seed("Private Key"), public key("Account ID") and SS58 address("Public Address") of a new account. DO NOT SHARE your mnemonic phrase or secret seed with ANYONE it will give them access to your funds. If someone is making a transfer to you they will only need your **Public Address**. === Inspecting a key @@ -80,4 +80,4 @@ Public Key URI `F3opxRbN5ZbjJNU511Kj2TLuzFcDq9BGduA9TgiECafpg29` is account: Public key (hex): 0x6d6f646c70792f74727372790000000000000000000000000000000000000000 Account ID: 0x6d6f646c70792f74727372790000000000000000000000000000000000000000 SS58 Address: F3opxRbN5ZbjJNU511Kj2TLuzFcDq9BGduA9TgiECafpg29 -``` \ No newline at end of file +``` diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index e41b250269a11..14841d8d3e96f 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -22,11 +22,11 @@ use std::sync::Arc; use std::collections::{HashMap, HashSet}; use sp_core::ChangesTrieConfigurationRange; use sp_core::offchain::OffchainStorage; -use sp_runtime::{generic::BlockId, Justification, Storage}; +use sp_runtime::{generic::BlockId, Justification, Justifications, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor, HashFor}; use sp_state_machine::{ ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, - StorageCollection, ChildStorageCollection, OffchainChangesCollection, + StorageCollection, ChildStorageCollection, OffchainChangesCollection, IndexOperation, }; use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; use crate::{ @@ -148,7 +148,7 @@ pub trait BlockImportOperation { &mut self, header: Block::Header, body: Option>, - justification: Option, + justifications: Option, state: NewBlockState, ) -> sp_blockchain::Result<()>; @@ -197,9 +197,13 @@ pub trait BlockImportOperation { id: BlockId, justification: Option, ) -> sp_blockchain::Result<()>; + /// Mark a block as new head. If both block import and set head are specified, set head /// overrides block import's best block rule. fn mark_head(&mut self, id: BlockId) -> sp_blockchain::Result<()>; + + /// Add a transaction index operation. + fn update_transaction_index(&mut self, index: Vec) -> sp_blockchain::Result<()>; } /// Interface for performing operations on the backend. @@ -230,7 +234,6 @@ pub trait Finalizer> { notify: bool, ) -> sp_blockchain::Result<()>; - /// Finalize a block. /// /// This will implicitly finalize all blocks up to it and @@ -250,7 +253,6 @@ pub trait Finalizer> { justification: Option, notify: bool, ) -> sp_blockchain::Result<()>; - } /// Provides access to an auxiliary database. @@ -432,6 +434,15 @@ pub trait Backend: AuxStore + Send + Sync { justification: Option, ) -> sp_blockchain::Result<()>; + /// Append justification to the block with the given Id. + /// + /// This should only be called for blocks that are already finalized. + fn append_justification( + &self, + block: BlockId, + justification: Justification, + ) -> sp_blockchain::Result<()>; + /// Returns reference to blockchain backend. fn blockchain(&self) -> &Self::Blockchain; diff --git a/client/api/src/client.rs b/client/api/src/client.rs index 990a7908b62bb..4a0940b1f4bd3 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -23,7 +23,7 @@ use sp_core::storage::StorageKey; use sp_runtime::{ traits::{Block as BlockT, NumberFor}, generic::{BlockId, SignedBlock}, - Justification, + Justifications, }; use sp_consensus::BlockOrigin; @@ -90,21 +90,24 @@ pub trait BlockBackend { /// Get block status. fn block_status(&self, id: &BlockId) -> sp_blockchain::Result; - /// Get block justification set by id. - fn justification(&self, id: &BlockId) -> sp_blockchain::Result>; + /// Get block justifications for the block with the given id. + fn justifications(&self, id: &BlockId) -> sp_blockchain::Result>; /// Get block hash by number. fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result>; - /// Get single extrinsic by hash. - fn extrinsic( + /// Get single indexed transaction by content hash. + /// + /// Note that this will only fetch transactions + /// that are indexed by the runtime with `storage_index_transaction`. + fn indexed_transaction( &self, hash: &Block::Hash, - ) -> sp_blockchain::Result::Extrinsic>>; + ) -> sp_blockchain::Result>>; - /// Check if extrinsic exists. - fn have_extrinsic(&self, hash: &Block::Hash) -> sp_blockchain::Result { - Ok(self.extrinsic(hash)?.is_some()) + /// Check if transaction index exists. + fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { + Ok(self.indexed_transaction(hash)?.is_some()) } } diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index b7060cf1d9b1b..930ae39c4b523 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -27,10 +27,10 @@ use sp_core::{ }; use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor}; -use sp_runtime::{Justification, Storage}; +use sp_runtime::{Justification, Justifications, Storage}; use sp_state_machine::{ ChangesTrieTransaction, InMemoryBackend, Backend as StateBackend, StorageCollection, - ChildStorageCollection, + ChildStorageCollection, IndexOperation, }; use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; @@ -51,12 +51,12 @@ struct PendingBlock { #[derive(PartialEq, Eq, Clone)] enum StoredBlock { - Header(B::Header, Option), - Full(B, Option), + Header(B::Header, Option), + Full(B, Option), } impl StoredBlock { - fn new(header: B::Header, body: Option>, just: Option) -> Self { + fn new(header: B::Header, body: Option>, just: Option) -> Self { match body { Some(body) => StoredBlock::Full(B::new(header, body), just), None => StoredBlock::Header(header, just), @@ -70,7 +70,7 @@ impl StoredBlock { } } - fn justification(&self) -> Option<&Justification> { + fn justifications(&self) -> Option<&Justifications> { match *self { StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref() } @@ -83,7 +83,7 @@ impl StoredBlock { } } - fn into_inner(self) -> (B::Header, Option>, Option) { + fn into_inner(self) -> (B::Header, Option>, Option) { match self { StoredBlock::Header(header, just) => (header, None, just), StoredBlock::Full(block, just) => { @@ -164,7 +164,7 @@ impl Blockchain { &self, hash: Block::Hash, header: ::Header, - justification: Option, + justifications: Option, body: Option::Extrinsic>>, new_state: NewBlockState, ) -> sp_blockchain::Result<()> { @@ -176,7 +176,7 @@ impl Blockchain { { let mut storage = self.storage.write(); storage.leaves.import(hash.clone(), number.clone(), header.parent_hash().clone()); - storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justification)); + storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justifications)); if let NewBlockState::Final = new_state { storage.finalized_hash = hash; @@ -285,16 +285,44 @@ impl Blockchain { let block = storage.blocks.get_mut(&hash) .expect("hash was fetched from a block in the db; qed"); - let block_justification = match block { + let block_justifications = match block { StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j }; - *block_justification = justification; + *block_justifications = justification.map(Justifications::from); } Ok(()) } + fn append_justification(&self, id: BlockId, justification: Justification) + -> sp_blockchain::Result<()> + { + let hash = self.expect_block_hash_from_id(&id)?; + let mut storage = self.storage.write(); + + let block = storage + .blocks + .get_mut(&hash) + .expect("hash was fetched from a block in the db; qed"); + + let block_justifications = match block { + StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j + }; + + if let Some(stored_justifications) = block_justifications { + if !stored_justifications.append(justification) { + return Err(sp_blockchain::Error::BadJustification( + "Duplicate consensus engine ID".into() + )); + } + } else { + *block_justifications = Some(Justifications::from(justification)); + }; + + Ok(()) + } + fn write_aux(&self, ops: Vec<(Vec, Option>)>) { let mut storage = self.storage.write(); for (k, v) in ops { @@ -365,9 +393,9 @@ impl blockchain::Backend for Blockchain { })) } - fn justification(&self, id: BlockId) -> sp_blockchain::Result> { + fn justifications(&self, id: BlockId) -> sp_blockchain::Result> { Ok(self.id(id).and_then(|hash| self.storage.read().blocks.get(&hash).and_then(|b| - b.justification().map(|x| x.clone())) + b.justifications().map(|x| x.clone())) )) } @@ -387,10 +415,10 @@ impl blockchain::Backend for Blockchain { unimplemented!() } - fn extrinsic( + fn indexed_transaction( &self, _hash: &Block::Hash, - ) -> sp_blockchain::Result::Extrinsic>> { + ) -> sp_blockchain::Result>> { unimplemented!("Not supported by the in-mem backend.") } } @@ -508,12 +536,12 @@ impl backend::BlockImportOperation for BlockImportOperatio &mut self, header: ::Header, body: Option::Extrinsic>>, - justification: Option, + justifications: Option, state: NewBlockState, ) -> sp_blockchain::Result<()> { assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); self.pending_block = Some(PendingBlock { - block: StoredBlock::new(header, body, justification), + block: StoredBlock::new(header, body, justifications), state, }); Ok(()) @@ -585,6 +613,10 @@ impl backend::BlockImportOperation for BlockImportOperatio self.set_head = Some(block); Ok(()) } + + fn update_transaction_index(&mut self, _index: Vec) -> sp_blockchain::Result<()> { + Ok(()) + } } /// In-memory backend. Keeps all states and blocks in memory. @@ -696,6 +728,14 @@ impl backend::Backend for Backend where Block::Hash self.blockchain.finalize_header(block, justification) } + fn append_justification( + &self, + block: BlockId, + justification: Justification, + ) -> sp_blockchain::Result<()> { + self.blockchain.append_justification(block, justification) + } + fn blockchain(&self) -> &Self::Blockchain { &self.blockchain } @@ -766,3 +806,64 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { Ok(()) } + +#[cfg(test)] +mod tests { + use crate::{NewBlockState, in_mem::Blockchain}; + use sp_api::{BlockId, HeaderT}; + use sp_runtime::{ConsensusEngineId, Justifications}; + use sp_blockchain::Backend; + use substrate_test_runtime::{Block, Header, H256}; + + pub const ID1: ConsensusEngineId = *b"TST1"; + pub const ID2: ConsensusEngineId = *b"TST2"; + + fn header(number: u64) -> Header { + let parent_hash = match number { + 0 => Default::default(), + _ => header(number - 1).hash(), + }; + Header::new(number, H256::from_low_u64_be(0), H256::from_low_u64_be(0), parent_hash, Default::default()) + } + + fn test_blockchain() -> Blockchain { + let blockchain = Blockchain::::new(); + let just0 = Some(Justifications::from((ID1, vec![0]))); + let just1 = Some(Justifications::from((ID1, vec![1]))); + let just2 = None; + let just3 = Some(Justifications::from((ID1, vec![3]))); + blockchain.insert(header(0).hash(), header(0), just0, None, NewBlockState::Final).unwrap(); + blockchain.insert(header(1).hash(), header(1), just1, None, NewBlockState::Final).unwrap(); + blockchain.insert(header(2).hash(), header(2), just2, None, NewBlockState::Best).unwrap(); + blockchain.insert(header(3).hash(), header(3), just3, None, NewBlockState::Final).unwrap(); + blockchain + } + + #[test] + fn append_and_retrieve_justifications() { + let blockchain = test_blockchain(); + let last_finalized = blockchain.last_finalized().unwrap(); + let block = BlockId::Hash(last_finalized); + + blockchain.append_justification(block, (ID2, vec![4])).unwrap(); + let justifications = { + let mut just = Justifications::from((ID1, vec![3])); + just.append((ID2, vec![4])); + just + }; + assert_eq!(blockchain.justifications(block).unwrap(), Some(justifications)); + } + + #[test] + fn store_duplicate_justifications_is_forbidden() { + let blockchain = test_blockchain(); + let last_finalized = blockchain.last_finalized().unwrap(); + let block = BlockId::Hash(last_finalized); + + blockchain.append_justification(block, (ID2, vec![0])).unwrap(); + assert!(matches!( + blockchain.append_justification(block, (ID2, vec![1])), + Err(sp_blockchain::Error::BadJustification(_)), + )); + } +} diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index 1971012c6aabc..47cac8b186f4a 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -25,7 +25,7 @@ use sp_runtime::traits::AtLeast32Bit; use codec::{Encode, Decode}; use sp_blockchain::{Error, Result}; -type DbHash = [u8; 32]; +type DbHash = sp_core::H256; #[derive(Debug, Clone, PartialEq, Eq)] struct LeafSetItem { @@ -55,6 +55,11 @@ impl FinalizationDisplaced { // one transaction, then there will be no overlap in the keys. self.leaves.append(&mut other.leaves); } + + /// Iterate over all displaced leaves. + pub fn leaves(&self) -> impl IntoIterator { + self.leaves.values().flatten() + } } /// list of leaf hashes ordered by number (descending). diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 4de6b5479066d..4a92186c444b7 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -23,7 +23,7 @@ derive_more = "0.99.2" either = "1.5.3" futures = "0.3.9" futures-timer = "3.0.1" -libp2p = { version = "0.35.1", default-features = false, features = ["kad"] } +libp2p = { version = "0.36.0", default-features = false, features = ["kad"] } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} prost = "0.7" diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 93ee4fc1445de..910abfad5ae1e 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -420,6 +420,7 @@ mod tests { use sp_blockchain::HeaderBackend; use sp_runtime::traits::NumberFor; use sc_client_api::Backend; + use futures::executor::block_on; const SOURCE: TransactionSource = TransactionSource::External; @@ -454,11 +455,11 @@ mod tests { client.clone(), ); - futures::executor::block_on( + block_on( txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0), extrinsic(1)]) ).unwrap(); - futures::executor::block_on( + block_on( txpool.maintain(chain_event( client.header(&BlockId::Number(0u64)) .expect("header get error") @@ -492,7 +493,7 @@ mod tests { // when let deadline = time::Duration::from_secs(3); - let block = futures::executor::block_on( + let block = block_on( proposer.propose(Default::default(), Default::default(), deadline) ).map(|r| r.block).unwrap(); @@ -538,7 +539,7 @@ mod tests { ); let deadline = time::Duration::from_secs(1); - futures::executor::block_on( + block_on( proposer.propose(Default::default(), Default::default(), deadline) ).map(|r| r.block).unwrap(); } @@ -559,11 +560,11 @@ mod tests { let genesis_hash = client.info().best_hash; let block_id = BlockId::Hash(genesis_hash); - futures::executor::block_on( + block_on( txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)]), ).unwrap(); - futures::executor::block_on( + block_on( txpool.maintain(chain_event( client.header(&BlockId::Number(0u64)) .expect("header get error") @@ -585,7 +586,7 @@ mod tests { ); let deadline = time::Duration::from_secs(9); - let proposal = futures::executor::block_on( + let proposal = block_on( proposer.propose(Default::default(), Default::default(), deadline), ).unwrap(); @@ -625,7 +626,7 @@ mod tests { client.clone(), ); - futures::executor::block_on( + block_on( txpool.submit_at(&BlockId::number(0), SOURCE, vec![ extrinsic(0), extrinsic(1), @@ -667,7 +668,7 @@ mod tests { // when let deadline = time::Duration::from_secs(9); - let block = futures::executor::block_on( + let block = block_on( proposer.propose(Default::default(), Default::default(), deadline) ).map(|r| r.block).unwrap(); @@ -679,7 +680,7 @@ mod tests { block }; - futures::executor::block_on( + block_on( txpool.maintain(chain_event( client.header(&BlockId::Number(0u64)) .expect("header get error") @@ -689,9 +690,9 @@ mod tests { // let's create one block and import it let block = propose_block(&client, 0, 2, 7); - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); - futures::executor::block_on( + block_on( txpool.maintain(chain_event( client.header(&BlockId::Number(1)) .expect("header get error") @@ -701,6 +702,6 @@ mod tests { // now let's make sure that we can still make some progress let block = propose_block(&client, 1, 2, 5); - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); } } diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 4617c2d790ada..55748ffb3d903 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -18,7 +18,7 @@ regex = "1.4.2" tokio = { version = "0.2.21", features = [ "signal", "rt-core", "rt-threaded", "blocking" ] } futures = "0.3.9" fdlimit = "0.2.1" -libp2p = "0.35.1" +libp2p = "0.36.0" parity-scale-codec = "2.0.0" hex = "0.4.2" rand = "0.7.3" diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 1465119c81d08..b2301fa9c5de5 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -26,7 +26,6 @@ futures = "0.3.9" futures-timer = "3.0.1" sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } log = "0.4.8" -parking_lot = "0.11.1" sp-core = { version = "3.0.0", path = "../../../primitives/core" } sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } sp-io = { version = "3.0.0", path = "../../../primitives/io" } @@ -38,6 +37,7 @@ sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } sc-telemetry = { version = "3.0.0", path = "../../telemetry" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} +async-trait = "0.1.42" # We enable it only for web-wasm check # See https://docs.rs/getrandom/0.2.1/getrandom/#webassembly-support getrandom = { version = "0.2", features = ["js"], optional = true } @@ -52,3 +52,4 @@ sc-network-test = { version = "0.8.0", path = "../../network/test" } sc-service = { version = "0.9.0", default-features = false, path = "../../service" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } tempfile = "3.1.0" +parking_lot = "0.11.1" diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index d3ed2bea3e115..736c89aff6b09 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -30,7 +30,7 @@ use log::{debug, info, trace}; use prometheus_endpoint::Registry; use codec::{Encode, Decode, Codec}; use sp_consensus::{ - BlockImport, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, + BlockImport, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, SlotData, BlockOrigin, Error as ConsensusError, BlockCheckParams, ImportResult, import_queue::{ Verifier, BasicQueue, DefaultImportQueue, BoxJustificationImport, @@ -39,7 +39,7 @@ use sp_consensus::{ use sc_client_api::{backend::AuxStore, BlockOf}; use sp_blockchain::{well_known_cache_keys::{self, Id as CacheKeyId}, ProvideCache, HeaderBackend}; use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_runtime::{generic::{BlockId, OpaqueDigestItemId}, Justification}; +use sp_runtime::{generic::{BlockId, OpaqueDigestItemId}, Justifications}; use sp_runtime::traits::{Block as BlockT, Header, DigestItemFor, Zero}; use sp_api::ProvideRuntimeApi; use sp_core::crypto::Pair; @@ -220,6 +220,7 @@ impl AuraVerifier where } } +#[async_trait::async_trait] impl Verifier for AuraVerifier where C: ProvideRuntimeApi + Send + @@ -234,11 +235,11 @@ impl Verifier for AuraVerifier where P::Signature: Encode + Decode, CAW: CanAuthorWith + Send + Sync + 'static, { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: B::Header, - justification: Option, + justifications: Option, mut body: Option>, ) -> Result<(BlockImportParams, Option)>>), String> { let mut inherent_data = self.inherent_data_providers @@ -284,7 +285,7 @@ impl Verifier for AuraVerifier where block.clone(), BlockId::Hash(parent_hash), inherent_data, - timestamp_now, + *timestamp_now, ).map_err(|e| e.to_string())?; } @@ -317,7 +318,7 @@ impl Verifier for AuraVerifier where let mut import_block = BlockImportParams::new(origin, pre_header); import_block.post_digests.push(seal); import_block.body = body; - import_block.justification = justification; + import_block.justifications = justifications; import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); import_block.post_hash = Some(hash); @@ -405,6 +406,7 @@ impl, P> AuraBlockImport } } +#[async_trait::async_trait] impl BlockImport for AuraBlockImport where I: BlockImport> + Send + Sync, I::Error: Into, @@ -412,18 +414,19 @@ impl BlockImport for AuraBlockImport: Send + 'static, { type Error = ConsensusError; type Transaction = sp_api::TransactionFor; - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - self.inner.check_block(block).map_err(Into::into) + self.inner.check_block(block).await.map_err(Into::into) } - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, new_cache: HashMap>, @@ -453,7 +456,7 @@ impl BlockImport for AuraBlockImport( S: sp_core::traits::SpawnEssentialNamed, CAW: CanAuthorWith + Send + Sync + 'static, { - register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?; + register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.slot_duration())?; initialize_authorities_cache(&*client)?; let verifier = AuraVerifier::<_, P, _>::new( diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index cce58304d0d02..77dac0f754487 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -35,7 +35,6 @@ use std::{ }; use futures::prelude::*; -use parking_lot::Mutex; use log::{debug, trace}; use codec::{Encode, Decode, Codec}; @@ -75,7 +74,7 @@ pub use sc_consensus_slots::SlotProportion; type AuthorityId

=

::Public; /// Slot duration type for Aura. -pub type SlotDuration = sc_consensus_slots::SlotDuration; +pub type SlotDuration = sc_consensus_slots::SlotDuration; /// Get type of `SlotDuration` for Aura. pub fn slot_duration(client: &C) -> CResult where @@ -111,12 +110,12 @@ impl SlotCompatible for AuraSlotCompatible { fn extract_timestamp_and_slot( &self, data: &InherentData, - ) -> Result<(u64, AuraInherent, std::time::Duration), sp_consensus::Error> { + ) -> Result<(sp_timestamp::Timestamp, AuraInherent, std::time::Duration), sp_consensus::Error> { data.timestamp_inherent_data() .and_then(|t| data.aura_inherent_data().map(|a| (t, a))) .map_err(Into::into) .map_err(sp_consensus::Error::InherentData) - .map(|(x, y)| (*x, y, Default::default())) + .map(|(x, y)| (x, y, Default::default())) } } @@ -161,7 +160,7 @@ pub fn start_aura( client, select_chain, block_import, - proposer_factory: env, + proposer_factory, sync_oracle, inherent_data_providers, force_authoring, @@ -187,22 +186,23 @@ pub fn start_aura( CAW: CanAuthorWith + Send, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, { - let worker = AuraWorker { + let worker = build_aura_worker::(BuildAuraWorkerParams { client: client.clone(), - block_import: Arc::new(Mutex::new(block_import)), - env, + block_import, + proposer_factory, keystore, sync_oracle: sync_oracle.clone(), force_authoring, backoff_authoring_blocks, telemetry, - _key_type: PhantomData::

, block_proposal_slot_portion, - }; + }); + register_aura_inherent_data_provider( &inherent_data_providers, slot_duration.slot_duration() )?; + Ok(sc_consensus_slots::start_slot_worker::<_, _, _, _, _, AuraSlotCompatible, _, _>( slot_duration, select_chain, @@ -214,9 +214,78 @@ pub fn start_aura( )) } +/// Parameters of [`build_aura_worker`]. +pub struct BuildAuraWorkerParams { + /// The client to interact with the chain. + pub client: Arc, + /// The block import. + pub block_import: I, + /// The proposer factory to build proposer instances. + pub proposer_factory: PF, + /// The sync oracle that can give us the current sync status. + pub sync_oracle: SO, + /// Should we force the authoring of blocks? + pub force_authoring: bool, + /// The backoff strategy when we miss slots. + pub backoff_authoring_blocks: Option, + /// The keystore used by the node. + pub keystore: SyncCryptoStorePtr, + /// The proportion of the slot dedicated to proposing. + /// + /// The block proposing will be limited to this proportion of the slot from the starting of the + /// slot. However, the proposing can still take longer when there is some lenience factor applied, + /// because there were no blocks produced for some slots. + pub block_proposal_slot_portion: SlotProportion, + /// Telemetry instance used to report telemetry metrics. + pub telemetry: Option, +} + +/// Build the aura worker. +/// +/// The caller is responsible for running this worker, otherwise it will do nothing. +pub fn build_aura_worker( + BuildAuraWorkerParams { + client, + block_import, + proposer_factory, + sync_oracle, + backoff_authoring_blocks, + keystore, + block_proposal_slot_portion, + telemetry, + force_authoring, + }: BuildAuraWorkerParams, +) -> impl sc_consensus_slots::SlotWorker>::Proof> where + B: BlockT, + C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, + C::Api: AuraApi>, + PF: Environment + Send + Sync + 'static, + PF::Proposer: Proposer>, + P: Pair + Send + Sync, + P::Public: AppPublic + Hash + Member + Encode + Decode, + P::Signature: TryFrom> + Hash + Member + Encode + Decode, + I: BlockImport> + Send + Sync + 'static, + Error: std::error::Error + Send + From + 'static, + SO: SyncOracle + Send + Sync + Clone, + BS: BackoffAuthoringBlocksStrategy> + Send + 'static, +{ + AuraWorker { + client, + block_import, + env: proposer_factory, + keystore, + sync_oracle, + force_authoring, + backoff_authoring_blocks, + telemetry, + _key_type: PhantomData::

, + block_proposal_slot_portion, + } +} + struct AuraWorker { client: Arc, - block_import: Arc>, + block_import: I, env: E, keystore: SyncCryptoStorePtr, sync_oracle: SO, @@ -256,8 +325,8 @@ where "aura" } - fn block_import(&self) -> Arc> { - self.block_import.clone() + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import } fn epoch_data( @@ -477,7 +546,7 @@ fn find_pre_digest(header: &B::Header) -> Result Result<(), sp_consensus::Error> { if !inherent_data_providers.has_provider(&INHERENT_IDENTIFIER) { inherent_data_providers @@ -511,6 +580,7 @@ mod tests { use super::*; use sp_consensus::{ NoNetwork as DummyOracle, Proposal, AlwaysCanAuthor, DisableProofRecording, + import_queue::BoxJustificationImport, }; use sc_network_test::{Block as TestBlock, *}; use sp_runtime::traits::{Block as BlockT, DigestFor}; @@ -572,13 +642,17 @@ mod tests { const SLOT_DURATION: u64 = 1000; + type AuraVerifier = import_queue::AuraVerifier; + type AuraPeer = Peer<(), PeersClient>; + pub struct AuraTestNet { - peers: Vec>, + peers: Vec, } impl TestNetFactory for AuraTestNet { - type Verifier = import_queue::AuraVerifier; + type Verifier = AuraVerifier; type PeerData = (); + type BlockImport = PeersClient; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { @@ -596,10 +670,10 @@ mod tests { let inherent_data_providers = InherentDataProviders::new(); register_aura_inherent_data_provider( &inherent_data_providers, - slot_duration.get() + slot_duration.slot_duration() ).expect("Registers aura inherent data provider"); - assert_eq!(slot_duration.get(), SLOT_DURATION); + assert_eq!(slot_duration.slot_duration().as_millis() as u64, SLOT_DURATION); import_queue::AuraVerifier::new( client, inherent_data_providers, @@ -612,14 +686,22 @@ mod tests { } } - fn peer(&mut self, i: usize) -> &mut Peer { + fn make_block_import(&self, client: PeersClient) -> ( + BlockImportAdapter, + Option>, + Self::PeerData, + ) { + (client.as_block_import(), None, ()) + } + + fn peer(&mut self, i: usize) -> &mut AuraPeer { &mut self.peers[i] } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec { &self.peers } - fn mut_peers>)>(&mut self, closure: F) { + fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } } @@ -665,7 +747,7 @@ mod tests { let inherent_data_providers = InherentDataProviders::new(); register_aura_inherent_data_provider( - &inherent_data_providers, slot_duration.get() + &inherent_data_providers, slot_duration.slot_duration() ).expect("Registers aura inherent data provider"); aura_futures.push(start_aura::(StartAuraParams { @@ -735,7 +817,7 @@ mod tests { let worker = AuraWorker { client: client.clone(), - block_import: Arc::new(Mutex::new(client)), + block_import: client, env: environ, keystore: keystore.into(), sync_oracle: DummyOracle.clone(), @@ -784,7 +866,7 @@ mod tests { let mut worker = AuraWorker { client: client.clone(), - block_import: Arc::new(Mutex::new(client.clone())), + block_import: client.clone(), env: environ, keystore: keystore.into(), sync_oracle: DummyOracle.clone(), @@ -801,7 +883,7 @@ mod tests { head, SlotInfo { slot: 0.into(), - timestamp: 0, + timestamp: 0.into(), ends_at: Instant::now() + Duration::from_secs(100), inherent_data: InherentData::new(), duration: Duration::from_millis(1000), diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 14d48fba1bb57..b04caeb3ee9d7 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -53,6 +53,7 @@ merlin = "2.0" pdqselect = "0.1.0" derive_more = "0.99.2" retain_mut = "0.1.2" +async-trait = "0.1.42" [dev-dependencies] sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } @@ -65,6 +66,3 @@ substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils sc-block-builder = { version = "0.9.0", path = "../../block-builder" } rand_chacha = "0.2.2" tempfile = "3.1.0" - -[features] -test-helpers = [] diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index ca14a764eece5..6696a65040a5e 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -217,7 +217,7 @@ fn epoch_data( SC: SelectChain, { let parent = select_chain.best_chain()?; - epoch_changes.lock().epoch_data_for_child_of( + epoch_changes.shared_data().epoch_data_for_child_of( descendent_query(&**client), &parent.hash(), parent.number().clone(), diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index 7d5df77c92176..8b8804e3bfb02 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -18,8 +18,6 @@ //! Schema for BABE epoch changes in the aux-db. -use std::sync::Arc; -use parking_lot::Mutex; use log::info; use codec::{Decode, Encode}; @@ -79,18 +77,19 @@ pub fn load_epoch_changes( }, }; - let epoch_changes = Arc::new(Mutex::new(maybe_epoch_changes.unwrap_or_else(|| { - info!(target: "babe", - "👶 Creating empty BABE epoch changes on what appears to be first startup." + let epoch_changes = SharedEpochChanges::::new(maybe_epoch_changes.unwrap_or_else(|| { + info!( + target: "babe", + "👶 Creating empty BABE epoch changes on what appears to be first startup.", ); EpochChangesFor::::default() - }))); + })); // rebalance the tree after deserialization. this isn't strictly necessary // since the tree is now rebalanced on every update operation. but since the // tree wasn't rebalanced initially it's useful to temporarily leave it here // to avoid having to wait until an import for rebalancing. - epoch_changes.lock().rebalance(); + epoch_changes.shared_data().rebalance(); Ok(epoch_changes) } @@ -189,7 +188,7 @@ mod test { ).unwrap(); assert!( - epoch_changes.lock() + epoch_changes.shared_data() .tree() .iter() .map(|(_, _, epoch)| epoch.clone()) @@ -201,7 +200,7 @@ mod test { ); // PersistedEpochHeader does not implement Debug, so we use assert! directly. write_epoch_changes::( - &epoch_changes.lock(), + &epoch_changes.shared_data(), |values| { client.insert_aux(values, &[]).unwrap(); }, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 3d72c436361c5..c3f1929c2ea8b 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -76,15 +76,15 @@ pub use sp_consensus_babe::{ pub use sp_consensus::SyncOracle; pub use sc_consensus_slots::SlotProportion; use std::{ - collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}, - any::Any, borrow::Cow, convert::TryInto, + collections::HashMap, sync::Arc, u64, pin::Pin, borrow::Cow, convert::TryInto, + time::{Duration, Instant}, }; use sp_consensus::{ImportResult, CanAuthorWith, import_queue::BoxJustificationImport}; use sp_core::crypto::Public; use sp_application_crypto::AppKey; use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; use sp_runtime::{ - generic::{BlockId, OpaqueDigestItemId}, Justification, + generic::{BlockId, OpaqueDigestItemId}, Justifications, traits::{Block as BlockT, Header, DigestItemFor, Zero}, }; use sp_api::{ProvideRuntimeApi, NumberFor}; @@ -345,8 +345,8 @@ impl Config { } } - /// Get the inner slot duration, in milliseconds. - pub fn slot_duration(&self) -> u64 { + /// Get the inner slot duration + pub fn slot_duration(&self) -> Duration { self.0.slot_duration() } } @@ -438,7 +438,7 @@ pub fn start_babe(BabeParams { + Sync + 'static, Error: std::error::Error + Send + From + From + 'static, SO: SyncOracle + Send + Sync + Clone + 'static, - CAW: CanAuthorWith + Send + 'static, + CAW: CanAuthorWith + Send + Sync + 'static, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, { const HANDLE_BUFFER_SIZE: usize = 1024; @@ -448,7 +448,7 @@ pub fn start_babe(BabeParams { let worker = BabeSlotWorker { client: client.clone(), - block_import: Arc::new(Mutex::new(block_import)), + block_import, env, sync_oracle: sync_oracle.clone(), force_authoring, @@ -502,7 +502,7 @@ async fn answer_requests( match request { BabeRequest::EpochForChild(parent_hash, parent_number, slot_number, response) => { let lookup = || { - let epoch_changes = epoch_changes.lock(); + let epoch_changes = epoch_changes.shared_data(); let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( descendent_query(&*client), &parent_hash, @@ -605,7 +605,7 @@ type SlotNotificationSinks = Arc< struct BabeSlotWorker { client: Arc, - block_import: Arc>, + block_import: I, env: E, sync_oracle: SO, force_authoring: bool, @@ -647,8 +647,8 @@ where "babe" } - fn block_import(&self) -> Arc> { - self.block_import.clone() + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import } fn epoch_data( @@ -656,7 +656,7 @@ where parent: &B::Header, slot: Slot, ) -> Result { - self.epoch_changes.lock().epoch_descriptor_for_child_of( + self.epoch_changes.shared_data().epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent.hash(), parent.number().clone(), @@ -667,7 +667,8 @@ where } fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { - self.epoch_changes.lock() + self.epoch_changes + .shared_data() .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) .map(|epoch| epoch.as_ref().authorities.len()) } @@ -681,7 +682,7 @@ where debug!(target: "babe", "Attempting to claim slot {}", slot); let s = authorship::claim_slot( slot, - self.epoch_changes.lock().viable_epoch( + self.epoch_changes.shared_data().viable_epoch( &epoch_descriptor, |slot| Epoch::genesis(&self.config, slot) )?.as_ref(), @@ -768,7 +769,7 @@ where import_block.storage_changes = Some(storage_changes); import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); Ok(import_block) @@ -919,13 +920,13 @@ impl SlotCompatible for TimeSource { fn extract_timestamp_and_slot( &self, data: &InherentData, - ) -> Result<(u64, Slot, std::time::Duration), sp_consensus::Error> { + ) -> Result<(sp_timestamp::Timestamp, Slot, std::time::Duration), sp_consensus::Error> { trace!(target: "babe", "extract timestamp"); data.timestamp_inherent_data() .and_then(|t| data.babe_inherent_data().map(|a| (t, a))) .map_err(Into::into) .map_err(sp_consensus::Error::InherentData) - .map(|(x, y)| (*x, y, self.0.lock().0.take().unwrap_or_default())) + .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) } } @@ -1083,6 +1084,7 @@ where } } +#[async_trait::async_trait] impl Verifier for BabeVerifier where @@ -1093,19 +1095,19 @@ where SelectChain: sp_consensus::SelectChain, CAW: CanAuthorWith + Send + Sync, { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: Block::Header, - justification: Option, + justifications: Option, mut body: Option>, ) -> Result<(BlockImportParams, Option)>>), String> { trace!( target: "babe", - "Verifying origin: {:?} header: {:?} justification: {:?} body: {:?}", + "Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", origin, header, - justification, + justifications, body, ); @@ -1125,7 +1127,7 @@ where .map_err(Error::::FetchParentHeader)?; let pre_digest = find_pre_digest::(&header)?; - let epoch_changes = self.epoch_changes.lock(); + let epoch_changes = self.epoch_changes.shared_data(); let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent_hash, @@ -1189,15 +1191,16 @@ where self.telemetry; CONSENSUS_TRACE; "babe.checked_and_importing"; - "pre_header" => ?pre_header); + "pre_header" => ?pre_header, + ); let mut import_block = BlockImportParams::new(origin, pre_header); import_block.post_digests.push(verified_info.seal); import_block.body = body; - import_block.justification = justification; + import_block.justifications = justifications; import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); import_block.post_hash = Some(hash); @@ -1220,7 +1223,7 @@ where /// Register the babe inherent data provider, if not registered already. pub fn register_babe_inherent_data_provider( inherent_data_providers: &InherentDataProviders, - slot_duration: u64, + slot_duration: Duration, ) -> Result<(), sp_consensus::Error> { debug!(target: "babe", "Registering"); if !inherent_data_providers.has_provider(&sp_consensus_babe::inherents::INHERENT_IDENTIFIER) { @@ -1275,6 +1278,7 @@ impl BabeBlockImport { } } +#[async_trait::async_trait] impl BlockImport for BabeBlockImport where Block: BlockT, Inner: BlockImport> + Send + Sync, @@ -1286,7 +1290,7 @@ impl BlockImport for BabeBlockImport; - fn import_block( + async fn import_block( &mut self, mut block: BlockImportParams, new_cache: HashMap>, @@ -1328,202 +1332,209 @@ impl BlockImport for BabeBlockImport::ParentBlockNoAssociatedWeight(hash)).into() ))? - }; + }; - let intermediate = block.take_intermediate::>( - INTERMEDIATE_KEY - )?; + let intermediate = block.take_intermediate::>( + INTERMEDIATE_KEY + )?; - let epoch_descriptor = intermediate.epoch_descriptor; - let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); - (epoch_descriptor, first_in_epoch, parent_weight) - }; + let epoch_descriptor = intermediate.epoch_descriptor; + let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); + (epoch_descriptor, first_in_epoch, parent_weight) + }; - let total_weight = parent_weight + pre_digest.added_weight(); - - // search for this all the time so we can reject unexpected announcements. - let next_epoch_digest = find_next_epoch_digest::(&block.header) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - let next_config_digest = find_next_config_digest::(&block.header) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - match (first_in_epoch, next_epoch_digest.is_some(), next_config_digest.is_some()) { - (true, true, _) => {}, - (false, false, false) => {}, - (false, false, true) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::UnexpectedConfigChange).into(), + let total_weight = parent_weight + pre_digest.added_weight(); + + // search for this all the time so we can reject unexpected announcements. + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + let next_config_digest = find_next_config_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + match (first_in_epoch, next_epoch_digest.is_some(), next_config_digest.is_some()) { + (true, true, _) => {}, + (false, false, false) => {}, + (false, false, true) => { + return Err( + ConsensusError::ClientImport( + babe_err(Error::::UnexpectedConfigChange).into(), + ) ) - ) - }, - (true, false, _) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::ExpectedEpochChange(hash, slot)).into(), + }, + (true, false, _) => { + return Err( + ConsensusError::ClientImport( + babe_err(Error::::ExpectedEpochChange(hash, slot)).into(), + ) ) - ) - }, - (false, true, _) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::UnexpectedEpochChange).into(), + }, + (false, true, _) => { + return Err( + ConsensusError::ClientImport( + babe_err(Error::::UnexpectedEpochChange).into(), + ) ) - ) - }, - } + }, + } - // if there's a pending epoch we'll save the previous epoch changes here - // this way we can revert it if there's any error - let mut old_epoch_changes = None; + let info = self.client.info(); - let info = self.client.info(); + if let Some(next_epoch_descriptor) = next_epoch_digest { + old_epoch_changes = Some((*epoch_changes).clone()); - if let Some(next_epoch_descriptor) = next_epoch_digest { - old_epoch_changes = Some(epoch_changes.clone()); + let viable_epoch = epoch_changes.viable_epoch( + &epoch_descriptor, + |slot| Epoch::genesis(&self.config, slot) + ).ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })?; - let viable_epoch = epoch_changes.viable_epoch( - &epoch_descriptor, - |slot| Epoch::genesis(&self.config, slot) - ).ok_or_else(|| { - ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) - })?; + let epoch_config = next_config_digest.map(Into::into).unwrap_or_else( + || viable_epoch.as_ref().config.clone() + ); - let epoch_config = next_config_digest.map(Into::into).unwrap_or_else( - || viable_epoch.as_ref().config.clone() - ); + // restrict info logging during initial sync to avoid spam + let log_level = if block.origin == BlockOrigin::NetworkInitialSync { + log::Level::Debug + } else { + log::Level::Info + }; - // restrict info logging during initial sync to avoid spam - let log_level = if block.origin == BlockOrigin::NetworkInitialSync { - log::Level::Debug - } else { - log::Level::Info - }; + log!(target: "babe", + log_level, + "👶 New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot, + viable_epoch.as_ref().start_slot, + ); - log!(target: "babe", - log_level, - "👶 New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_index, - hash, - slot, - viable_epoch.as_ref().start_slot, - ); + let next_epoch = viable_epoch.increment((next_epoch_descriptor, epoch_config)); - let next_epoch = viable_epoch.increment((next_epoch_descriptor, epoch_config)); + log!(target: "babe", + log_level, + "👶 Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, + ); - log!(target: "babe", - log_level, - "👶 Next epoch starts at slot {}", - next_epoch.as_ref().start_slot, - ); + // prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized( + self.client.clone(), + &mut epoch_changes, + )?; - // prune the tree of epochs not part of the finalized chain or - // that are not live anymore, and then track the given epoch change - // in the tree. - // NOTE: it is important that these operations are done in this - // order, otherwise if pruning after import the `is_descendent_of` - // used by pruning may not know about the block that is being - // imported. - let prune_and_import = || { - prune_finalized( - self.client.clone(), - &mut epoch_changes, - )?; + epoch_changes.import( + descendent_query(&*self.client), + hash, + number, + *block.header.parent_hash(), + next_epoch, + ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; - epoch_changes.import( - descendent_query(&*self.client), - hash, - number, - *block.header.parent_hash(), - next_epoch, - ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; + Ok(()) + }; - Ok(()) - }; + if let Err(e) = prune_and_import() { + debug!(target: "babe", "Failed to launch next epoch: {:?}", e); + *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); + return Err(e); + } - if let Err(e) = prune_and_import() { - debug!(target: "babe", "Failed to launch next epoch: {:?}", e); - *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); - return Err(e); + crate::aux_schema::write_epoch_changes::( + &*epoch_changes, + |insert| block.auxiliary.extend( + insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + ) + ); } - crate::aux_schema::write_epoch_changes::( - &*epoch_changes, - |insert| block.auxiliary.extend( - insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ) + aux_schema::write_block_weight( + hash, + total_weight, + |values| block.auxiliary.extend( + values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + ), ); - } - - aux_schema::write_block_weight( - hash, - total_weight, - |values| block.auxiliary.extend( - values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ), - ); - // The fork choice rule is that we pick the heaviest chain (i.e. - // more primary blocks), if there's a tie we go with the longest - // chain. - block.fork_choice = { - let (last_best, last_best_number) = (info.best_hash, info.best_number); - - let last_best_weight = if &last_best == block.header.parent_hash() { - // the parent=genesis case is already covered for loading parent weight, - // so we don't need to cover again here. - parent_weight - } else { - aux_schema::load_block_weight(&*self.client, last_best) - .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? + // The fork choice rule is that we pick the heaviest chain (i.e. + // more primary blocks), if there's a tie we go with the longest + // chain. + block.fork_choice = { + let (last_best, last_best_number) = (info.best_hash, info.best_number); + + let last_best_weight = if &last_best == block.header.parent_hash() { + // the parent=genesis case is already covered for loading parent weight, + // so we don't need to cover again here. + parent_weight + } else { + aux_schema::load_block_weight(&*self.client, last_best) + .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? .ok_or_else( || ConsensusError::ChainLookup("No block weight for parent header.".to_string()) )? + }; + + Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { + true + } else if total_weight == last_best_weight { + number > last_best_number + } else { + false + })) }; - Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { - true - } else if total_weight == last_best_weight { - number > last_best_number - } else { - false - })) + // Release the mutex, but it stays locked + epoch_changes.release_mutex() }; - let import_result = self.inner.import_block(block, new_cache); + let import_result = self.inner.import_block(block, new_cache).await; // revert to the original epoch changes in case there's an error // importing the block if import_result.is_err() { if let Some(old_epoch_changes) = old_epoch_changes { - *epoch_changes = old_epoch_changes; + *epoch_changes.upgrade() = old_epoch_changes; } } import_result.map_err(Into::into) } - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - self.inner.check_block(block).map_err(Into::into) + self.inner.check_block(block).await.map_err(Into::into) } } @@ -1583,7 +1594,7 @@ pub fn block_import( // startup rather than waiting until importing the next epoch change block. prune_finalized( client.clone(), - &mut epoch_changes.lock(), + &mut epoch_changes.shared_data(), )?; let import = BabeBlockImport::new( @@ -1626,7 +1637,7 @@ pub fn import_queue( SelectChain: sp_consensus::SelectChain + 'static, CAW: CanAuthorWith + Send + Sync + 'static, { - register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?; + register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration())?; let verifier = BabeVerifier { select_chain, @@ -1647,41 +1658,3 @@ pub fn import_queue( registry, )) } - -/// BABE test helpers. Utility methods for manually authoring blocks. -#[cfg(feature = "test-helpers")] -pub mod test_helpers { - use super::*; - - /// Try to claim the given slot and return a `BabePreDigest` if - /// successful. - pub fn claim_slot( - slot: Slot, - parent: &B::Header, - client: &C, - keystore: SyncCryptoStorePtr, - link: &BabeLink, - ) -> Option where - B: BlockT, - C: ProvideRuntimeApi + - ProvideCache + - HeaderBackend + - HeaderMetadata, - C::Api: BabeApi, - { - let epoch_changes = link.epoch_changes.lock(); - let epoch = epoch_changes.epoch_data_for_child_of( - descendent_query(client), - &parent.hash(), - parent.number().clone(), - slot, - |slot| Epoch::genesis(&link.config, slot), - ).unwrap().unwrap(); - - authorship::claim_slot( - slot, - &epoch, - &keystore, - ).map(|(digest, _)| digest) - } -} diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index d3e51b020326c..839d38b94a933 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -47,6 +47,7 @@ use rand_chacha::{ }; use sc_keystore::LocalKeystore; use sp_application_crypto::key_types::BABE; +use futures::executor::block_on; type Item = DigestItem; @@ -67,6 +68,9 @@ enum Stage { type Mutator = Arc; +type BabeBlockImport = + PanickingBlockImport>>; + #[derive(Clone)] struct DummyFactory { client: Arc, @@ -134,7 +138,7 @@ impl DummyProposer { // figure out if we should add a consensus digest, since the test runtime // doesn't. - let epoch_changes = self.factory.epoch_changes.lock(); + let epoch_changes = self.factory.epoch_changes.shared_data(); let epoch = epoch_changes.epoch_data_for_child_of( descendent_query(&*self.factory.client), &self.parent_hash, @@ -188,30 +192,37 @@ thread_local! { } #[derive(Clone)] -struct PanickingBlockImport(B); - -impl> BlockImport for PanickingBlockImport { +pub struct PanickingBlockImport(B); + +#[async_trait::async_trait] +impl> BlockImport for PanickingBlockImport + where + B::Transaction: Send, + B: Send, +{ type Error = B::Error; type Transaction = B::Transaction; - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, new_cache: HashMap>, ) -> Result { - Ok(self.0.import_block(block, new_cache).expect("importing block failed")) + Ok(self.0.import_block(block, new_cache).await.expect("importing block failed")) } - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - Ok(self.0.check_block(block).expect("checking block failed")) + Ok(self.0.check_block(block).await.expect("checking block failed")) } } +type BabePeer = Peer, BabeBlockImport>; + pub struct BabeTestNet { - peers: Vec>>, + peers: Vec, } type TestHeader = ::Header; @@ -227,20 +238,21 @@ pub struct TestVerifier { mutator: Mutator, } +#[async_trait::async_trait] impl Verifier for TestVerifier { /// Verify the given data and return the BlockImportParams and an optional /// new set of validators to import. If not, err with an Error-Message /// presented to the User in the logs. - fn verify( + async fn verify( &mut self, origin: BlockOrigin, mut header: TestHeader, - justification: Option, + justifications: Option, body: Option>, ) -> Result<(BlockImportParams, Option)>>), String> { // apply post-sealing mutations (i.e. stripping seal, if desired). (self.mutator)(&mut header, Stage::PostSeal); - self.inner.verify(origin, header, justification, body) + self.inner.verify(dbg!(origin), header, justifications, body).await } } @@ -255,6 +267,7 @@ pub struct PeerData { impl TestNetFactory for BabeTestNet { type Verifier = TestVerifier; type PeerData = Option; + type BlockImport = BabeBlockImport; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { @@ -264,9 +277,9 @@ impl TestNetFactory for BabeTestNet { } } - fn make_block_import(&self, client: PeersClient) + fn make_block_import(&self, client: PeersClient) -> ( - BlockImportAdapter, + BlockImportAdapter, Option>, Option, ) @@ -287,7 +300,7 @@ impl TestNetFactory for BabeTestNet { Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>) ); ( - BlockImportAdapter::new_full(block_import), + BlockImportAdapter::new(block_import), None, Some(PeerData { link, inherent_data_providers, block_import: data_block_import }), ) @@ -326,17 +339,17 @@ impl TestNetFactory for BabeTestNet { } } - fn peer(&mut self, i: usize) -> &mut Peer { + fn peer(&mut self, i: usize) -> &mut BabePeer { trace!(target: "babe", "Retrieving a peer"); &mut self.peers[i] } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec { trace!(target: "babe", "Retrieving peers"); &self.peers } - fn mut_peers>)>( + fn mut_peers)>( &mut self, closure: F, ) { @@ -436,7 +449,7 @@ fn run_one_test( telemetry: None, }).expect("Starts babe")); } - futures::executor::block_on(future::select( + block_on(future::select( futures::future::poll_fn(move |cx| { let mut net = net.lock(); net.poll(cx); @@ -567,7 +580,7 @@ fn can_author_block() { } // Propose and import a new BABE block on top of the given parent. -fn propose_and_import_block( +fn propose_and_import_block( parent: &TestHeader, slot: Option, proposer_factory: &mut DummyFactory, @@ -595,7 +608,7 @@ fn propose_and_import_block( let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap().block; - let epoch_descriptor = proposer_factory.epoch_changes.lock().epoch_descriptor_for_child_of( + let epoch_descriptor = proposer_factory.epoch_changes.shared_data().epoch_descriptor_for_child_of( descendent_query(&*proposer_factory.client), &parent_hash, *parent.number(), @@ -623,10 +636,10 @@ fn propose_and_import_block( import.body = Some(block.extrinsics); import.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - let import_result = block_import.import_block(import, Default::default()).unwrap(); + let import_result = block_on(block_import.import_block(import, Default::default())).unwrap(); match import_result { ImportResult::Imported(_) => {}, @@ -664,7 +677,7 @@ fn importing_block_one_sets_genesis_epoch() { let genesis_epoch = Epoch::genesis(&data.link.config, 999.into()); - let epoch_changes = data.link.epoch_changes.lock(); + let epoch_changes = data.link.epoch_changes.shared_data(); let epoch_for_second_block = epoch_changes.epoch_data_for_child_of( descendent_query(&*client), &block_hash, @@ -739,13 +752,13 @@ fn importing_epoch_change_block_prunes_tree() { // We should be tracking a total of 9 epochs in the fork tree assert_eq!( - epoch_changes.lock().tree().iter().count(), + epoch_changes.shared_data().tree().iter().count(), 9, ); // And only one root assert_eq!( - epoch_changes.lock().tree().roots().count(), + epoch_changes.shared_data().tree().roots().count(), 1, ); @@ -756,16 +769,16 @@ fn importing_epoch_change_block_prunes_tree() { // at this point no hashes from the first fork must exist on the tree assert!( - !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_1.contains(h)), + !epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_1.contains(h)), ); // but the epoch changes from the other forks must still exist assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)) + epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)) ); assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), + epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), ); // finalizing block #25 from the canon chain should prune out the second fork @@ -774,12 +787,12 @@ fn importing_epoch_change_block_prunes_tree() { // at this point no hashes from the second fork must exist on the tree assert!( - !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)), + !epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)), ); // while epoch changes from the last fork should still exist assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), + epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), ); } diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 41c42866e7272..5762b9c998b67 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -17,3 +17,4 @@ sc-client-api = { version = "3.0.0", path = "../../api" } sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +parking_lot = "0.11.1" diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index a53517c5c35ea..9b4d705769196 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -17,6 +17,8 @@ // along with this program. If not, see . //! Collection of common consensus specific implementations + mod longest_chain; +pub mod shared_data; pub use longest_chain::LongestChain; diff --git a/client/consensus/common/src/shared_data.rs b/client/consensus/common/src/shared_data.rs new file mode 100644 index 0000000000000..d90fc6273e056 --- /dev/null +++ b/client/consensus/common/src/shared_data.rs @@ -0,0 +1,271 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Provides a generic wrapper around shared data. See [`SharedData`] for more information. + +use std::sync::Arc; +use parking_lot::{Mutex, MappedMutexGuard, Condvar, MutexGuard}; + +/// Created by [`SharedDataLocked::release_mutex`]. +/// +/// As long as the object isn't dropped, the shared data is locked. It is advised to drop this +/// object when the shared data doesn't need to be locked anymore. To get access to the shared data +/// [`Self::upgrade`] is provided. +#[must_use = "Shared data will be unlocked on drop!"] +pub struct SharedDataLockedUpgradable { + shared_data: SharedData, +} + +impl SharedDataLockedUpgradable { + /// Upgrade to a *real* mutex guard that will give access to the inner data. + /// + /// Every call to this function will reaquire the mutex again. + pub fn upgrade(&mut self) -> MappedMutexGuard { + MutexGuard::map(self.shared_data.inner.lock(), |i| &mut i.shared_data) + } +} + +impl Drop for SharedDataLockedUpgradable { + fn drop(&mut self) { + let mut inner = self.shared_data.inner.lock(); + // It should not be locked anymore + inner.locked = false; + + // Notify all waiting threads. + self.shared_data.cond_var.notify_all(); + } +} + +/// Created by [`SharedData::shared_data_locked`]. +/// +/// As long as this object isn't dropped, the shared data is held in a mutex guard and the shared +/// data is tagged as locked. Access to the shared data is provided through [`Deref`] and +/// [`DerefMut`]. The trick is to use [`Self::release_mutex`] to release the mutex, but still keep +/// the shared data locked. This means every other thread trying to access the shared data in this +/// time will need to wait until this lock is freed. +/// +/// If this object is dropped without calling [`Self::release_mutex`], the lock will be dropped +/// immediately. +#[must_use = "Shared data will be unlocked on drop!"] +pub struct SharedDataLocked<'a, T> { + /// The current active mutex guard holding the inner data. + inner: MutexGuard<'a, SharedDataInner>, + /// The [`SharedData`] instance that created this instance. + /// + /// This instance is only taken on drop or when calling [`Self::release_mutex`]. + shared_data: Option>, +} + +impl<'a, T> SharedDataLocked<'a, T> { + /// Release the mutex, but keep the shared data locked. + pub fn release_mutex(mut self) -> SharedDataLockedUpgradable { + SharedDataLockedUpgradable { + shared_data: self.shared_data.take() + .expect("`shared_data` is only taken on drop; qed"), + } + } +} + +impl<'a, T> Drop for SharedDataLocked<'a, T> { + fn drop(&mut self) { + if let Some(shared_data) = self.shared_data.take() { + // If the `shared_data` is still set, it means [`Self::release_mutex`] wasn't + // called and the lock should be released. + self.inner.locked = false; + + // Notify all waiting threads about the released lock. + shared_data.cond_var.notify_all(); + } + } +} + +impl<'a, T> std::ops::Deref for SharedDataLocked<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.inner.shared_data + } +} + +impl<'a, T> std::ops::DerefMut for SharedDataLocked<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner.shared_data + } +} + +/// Holds the shared data and if the shared data is currently locked. +/// +/// For more information see [`SharedData`]. +struct SharedDataInner { + /// The actual shared data that is protected here against concurrent access. + shared_data: T, + /// Is `shared_data` currently locked and can not be accessed? + locked: bool, +} + +/// Some shared data that provides support for locking this shared data for some time. +/// +/// When working with consensus engines there is often data that needs to be shared between multiple +/// parts of the system, like block production and block import. This struct provides an abstraction +/// for this shared data in a generic way. +/// +/// The pain point when sharing this data is often the usage of mutex guards in an async context as +/// this doesn't work for most of them as these guards don't implement `Send`. This abstraction +/// provides a way to lock the shared data, while not having the mutex locked. So, the data stays +/// locked and we are still able to hold this lock over an `await` call. +/// +/// # Example +/// +/// ``` +///# use sc_consensus::shared_data::SharedData; +/// +/// let shared_data = SharedData::new(String::from("hello world")); +/// +/// let lock = shared_data.shared_data_locked(); +/// +/// let shared_data2 = shared_data.clone(); +/// let join_handle1 = std::thread::spawn(move || { +/// // This will need to wait for the outer lock to be released before it can access the data. +/// shared_data2.shared_data().push_str("1"); +/// }); +/// +/// assert_eq!(*lock, "hello world"); +/// +/// // Let us release the mutex, but we still keep it locked. +/// // Now we could call `await` for example. +/// let mut lock = lock.release_mutex(); +/// +/// let shared_data2 = shared_data.clone(); +/// let join_handle2 = std::thread::spawn(move || { +/// shared_data2.shared_data().push_str("2"); +/// }); +/// +/// // We still have the lock and can upgrade it to access the data. +/// assert_eq!(*lock.upgrade(), "hello world"); +/// lock.upgrade().push_str("3"); +/// +/// drop(lock); +/// join_handle1.join().unwrap(); +/// join_handle2.join().unwrap(); +/// +/// let data = shared_data.shared_data(); +/// // As we don't know the order of the threads, we need to check for both combinations +/// assert!(*data == "hello world321" || *data == "hello world312"); +/// ``` +pub struct SharedData { + inner: Arc>>, + cond_var: Arc, +} + +impl Clone for SharedData { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + cond_var: self.cond_var.clone(), + } + } +} + +impl SharedData { + /// Create a new instance of [`SharedData`] to share the given `shared_data`. + pub fn new(shared_data: T) -> Self { + Self { + inner: Arc::new(Mutex::new(SharedDataInner { shared_data, locked: false })), + cond_var: Default::default(), + } + } + + /// Acquire access to the shared data. + /// + /// This will give mutable access to the shared data. After the returned mutex guard is dropped, + /// the shared data is accessible by other threads. So, this function should be used when + /// reading/writing of the shared data in a local context is required. + /// + /// When requiring to lock shared data for some longer time, even with temporarily releasing the + /// lock, [`Self::shared_data_locked`] should be used. + pub fn shared_data(&self) -> MappedMutexGuard { + let mut guard = self.inner.lock(); + + while guard.locked { + self.cond_var.wait(&mut guard); + } + + debug_assert!(!guard.locked); + + MutexGuard::map(guard, |i| &mut i.shared_data) + } + + /// Acquire access to the shared data and lock it. + /// + /// This will give mutable access to the shared data. The returned [`SharedDataLocked`] + /// provides the function [`SharedDataLocked::release_mutex`] to release the mutex, but + /// keeping the data locked. This is useful in async contexts for example where the data needs to + /// be locked, but a mutex guard can not be held. + /// + /// For an example see [`SharedData`]. + pub fn shared_data_locked(&self) -> SharedDataLocked { + let mut guard = self.inner.lock(); + + while guard.locked { + self.cond_var.wait(&mut guard); + } + + debug_assert!(!guard.locked); + guard.locked = true; + + SharedDataLocked { + inner: guard, + shared_data: Some(self.clone()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn shared_data_locking_works() { + const THREADS: u32 = 100; + let shared_data = SharedData::new(0u32); + + let lock = shared_data.shared_data_locked(); + + for i in 0..THREADS { + let data = shared_data.clone(); + std::thread::spawn(move || { + if i % 2 == 1 { + *data.shared_data() += 1; + } else { + let mut lock = data.shared_data_locked().release_mutex(); + // Give the other threads some time to wake up + std::thread::sleep(std::time::Duration::from_millis(10)); + *lock.upgrade() += 1; + } + }); + } + + let lock = lock.release_mutex(); + std::thread::sleep(std::time::Duration::from_millis(100)); + drop(lock); + + while *shared_data.shared_data() < THREADS { + std::thread::sleep(std::time::Duration::from_millis(100)); + } + } +} diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index bebe6979e694e..8e2fe77100967 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } -parking_lot = "0.11.1" fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } sp-runtime = { path = "../../../primitives/runtime" , version = "3.0.0"} sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } sc-client-api = { path = "../../api" , version = "3.0.0"} +sc-consensus = { path = "../common" , version = "0.9.0"} diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index 5c5ef446993a2..98a3e83530510 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -20,8 +20,7 @@ pub mod migration; -use std::{sync::Arc, ops::Add, collections::BTreeMap, borrow::{Borrow, BorrowMut}}; -use parking_lot::Mutex; +use std::{ops::Add, collections::BTreeMap, borrow::{Borrow, BorrowMut}}; use codec::{Encode, Decode}; use fork_tree::ForkTree; use sc_client_api::utils::is_descendent_of; @@ -645,10 +644,12 @@ impl EpochChanges where } /// Type alias to produce the epoch-changes tree from a block type. -pub type EpochChangesFor = EpochChanges<::Hash, NumberFor, Epoch>; +pub type EpochChangesFor = + EpochChanges<::Hash, NumberFor, Epoch>; /// A shared epoch changes tree. -pub type SharedEpochChanges = Arc>>; +pub type SharedEpochChanges = + sc_consensus::shared_data::SharedData>; #[cfg(test)] mod tests { diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 679fd5a3eb388..32cc89034fb1d 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -23,6 +23,7 @@ parking_lot = "0.11.1" codec = { package = "parity-scale-codec", version = "2.0.0" } serde = { version = "1.0", features=["derive"] } assert_matches = "1.3.0" +async-trait = "0.1.42" sc-client-api = { path = "../../api", version = "3.0.0"} sc-consensus-babe = { path = "../../consensus/babe", version = "0.9.0"} diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 7fe51c7b79ce5..d627ea2a25c3a 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -21,12 +21,7 @@ use super::ConsensusDataProvider; use crate::Error; use codec::Encode; -use std::{ - any::Any, - borrow::Cow, - sync::{Arc, atomic}, - time::SystemTime, -}; +use std::{borrow::Cow, sync::{Arc, atomic}, time::SystemTime}; use sc_client_api::AuxStore; use sc_consensus_babe::{ Config, Epoch, authorship, CompatibleDigestItem, BabeIntermediate, @@ -90,7 +85,7 @@ impl BabeConsensusDataProvider let timestamp_provider = SlotTimestampProvider::new(client.clone())?; provider.register_provider(timestamp_provider)?; - register_babe_inherent_data_provider(provider, config.slot_duration)?; + register_babe_inherent_data_provider(provider, config.slot_duration())?; Ok(Self { config, @@ -102,7 +97,7 @@ impl BabeConsensusDataProvider } fn epoch(&self, parent: &B::Header, slot: Slot) -> Result { - let epoch_changes = self.epoch_changes.lock(); + let epoch_changes = self.epoch_changes.shared_data(); let epoch_descriptor = epoch_changes .epoch_descriptor_for_child_of( descendent_query(&*self.client), @@ -156,7 +151,7 @@ impl ConsensusDataProvider for BabeConsensusDataProvider authority_index: 0_u32, }); - let mut epoch_changes = self.epoch_changes.lock(); + let mut epoch_changes = self.epoch_changes.shared_data(); let epoch_descriptor = epoch_changes .epoch_descriptor_for_child_of( descendent_query(&*self.client), @@ -200,7 +195,7 @@ impl ConsensusDataProvider for BabeConsensusDataProvider inherents: &InherentData ) -> Result<(), Error> { let slot = inherents.babe_inherent_data()?; - let epoch_changes = self.epoch_changes.lock(); + let epoch_changes = self.epoch_changes.shared_data(); let mut epoch_descriptor = epoch_changes .epoch_descriptor_for_child_of( descendent_query(&*self.client), @@ -239,7 +234,7 @@ impl ConsensusDataProvider for BabeConsensusDataProvider params.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); Ok(()) diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 64de70939503c..a5351c63bc3b4 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -27,7 +27,7 @@ use sp_consensus::{ }; use sp_blockchain::HeaderBackend; use sp_inherents::InherentDataProviders; -use sp_runtime::{traits::Block as BlockT, Justification}; +use sp_runtime::{traits::Block as BlockT, Justifications, ConsensusEngineId}; use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; use sc_transaction_pool::txpool; use std::{sync::Arc, marker::PhantomData}; @@ -49,19 +49,23 @@ pub use self::{ }; use sp_api::{ProvideRuntimeApi, TransactionFor}; +/// The `ConsensusEngineId` of Manual Seal. +pub const MANUAL_SEAL_ENGINE_ID: ConsensusEngineId = [b'm', b'a', b'n', b'l']; + /// The verifier for the manual seal engine; instantly finalizes. struct ManualSealVerifier; +#[async_trait::async_trait] impl Verifier for ManualSealVerifier { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: B::Header, - justification: Option, + justifications: Option, body: Option>, ) -> Result<(BlockImportParams, Option)>>), String> { let mut import_params = BlockImportParams::new(origin, header); - import_params.justification = justification; + import_params.justifications = justifications; import_params.body = body; import_params.finalized = false; import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); @@ -193,6 +197,7 @@ pub async fn run_manual_seal( ).await; } EngineCommand::FinalizeBlock { hash, sender, justification } => { + let justification = justification.map(|j| (MANUAL_SEAL_ENGINE_ID, j)); finalize_block( FinalizeBlockParams { hash, diff --git a/client/consensus/manual-seal/src/rpc.rs b/client/consensus/manual-seal/src/rpc.rs index 293d4487a5d59..eb056f22fed8b 100644 --- a/client/consensus/manual-seal/src/rpc.rs +++ b/client/consensus/manual-seal/src/rpc.rs @@ -28,7 +28,7 @@ use futures::{ SinkExt }; use serde::{Deserialize, Serialize}; -use sp_runtime::Justification; +use sp_runtime::EncodedJustification; pub use self::gen_client::Client as ManualSealClient; /// Future's type for jsonrpc @@ -62,7 +62,7 @@ pub enum EngineCommand { /// sender to report errors/success to the rpc. sender: Sender<()>, /// finalization justification - justification: Option, + justification: Option, } } @@ -83,7 +83,7 @@ pub trait ManualSealApi { fn finalize_block( &self, hash: Hash, - justification: Option + justification: Option ) -> FutureResult; } @@ -131,7 +131,7 @@ impl ManualSealApi for ManualSeal { Box::new(future.map_err(Error::from).compat()) } - fn finalize_block(&self, hash: Hash, justification: Option) -> FutureResult { + fn finalize_block(&self, hash: Hash, justification: Option) -> FutureResult { let mut sink = self.import_block_channel.clone(); let future = async move { let (sender, receiver) = oneshot::channel(); diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 2176973f3a298..23a560cebd54b 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -144,7 +144,7 @@ pub async fn seal_block( digest_provider.append_block_import(&parent, &mut params, &id)?; } - match block_import.import_block(params, HashMap::new())? { + match block_import.import_block(params, HashMap::new()).await? { ImportResult::Imported(aux) => { Ok(CreatedBlock { hash: ::Header::hash(&header), aux }) }, diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index 8be43a8fa04bc..86b0b1df54e26 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -30,3 +30,4 @@ parking_lot = "0.11.1" sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } derive_more = "0.99.2" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} +async-trait = "0.1.42" diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 482bc80170fe8..ea2e30afdc485 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -36,7 +36,7 @@ mod worker; pub use crate::worker::{MiningWorker, MiningMetadata, MiningBuild}; use std::{ - sync::Arc, any::Any, borrow::Cow, collections::HashMap, marker::PhantomData, + sync::Arc, borrow::Cow, collections::HashMap, marker::PhantomData, cmp::Ordering, time::Duration, }; use futures::{prelude::*, future::Either}; @@ -44,7 +44,7 @@ use parking_lot::Mutex; use sc_client_api::{BlockOf, backend::AuxStore, BlockchainEvents}; use sp_blockchain::{HeaderBackend, ProvideCache, well_known_cache_keys::Id as CacheKeyId}; use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_runtime::{Justification, RuntimeString}; +use sp_runtime::{Justifications, RuntimeString}; use sp_runtime::generic::{BlockId, Digest, DigestItem}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_api::ProvideRuntimeApi; @@ -307,6 +307,7 @@ impl PowBlockImport wher } } +#[async_trait::async_trait] impl BlockImport for PowBlockImport where B: BlockT, I: BlockImport> + Send + Sync, @@ -314,21 +315,21 @@ impl BlockImport for PowBlockImport, C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, C::Api: BlockBuilderApi, - Algorithm: PowAlgorithm, - Algorithm::Difficulty: 'static, - CAW: CanAuthorWith, + Algorithm: PowAlgorithm + Send, + Algorithm::Difficulty: 'static + Send, + CAW: CanAuthorWith + Send, { type Error = ConsensusError; type Transaction = sp_api::TransactionFor; - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - self.inner.check_block(block).map_err(Into::into) + self.inner.check_block(block).await.map_err(Into::into) } - fn import_block( + async fn import_block( &mut self, mut block: BlockImportParams, new_cache: HashMap>, @@ -403,7 +404,7 @@ impl BlockImport for PowBlockImport PowVerifier { } } +#[async_trait::async_trait] impl Verifier for PowVerifier where Algorithm: PowAlgorithm + Send + Sync, - Algorithm::Difficulty: 'static, + Algorithm::Difficulty: 'static + Send, { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: B::Header, - justification: Option, + justifications: Option, body: Option>, ) -> Result<(BlockImportParams, Option)>>), String> { let hash = header.hash(); @@ -470,10 +472,10 @@ impl Verifier for PowVerifier where let mut import_block = BlockImportParams::new(origin, checked_header); import_block.post_digests.push(seal); import_block.body = body; - import_block.justification = justification; + import_block.justifications = justifications; import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(intermediate) as Box + Box::new(intermediate) as Box<_>, ); import_block.post_hash = Some(hash); @@ -513,6 +515,7 @@ pub fn import_queue( B: BlockT, Transaction: Send + Sync + 'static, Algorithm: PowAlgorithm + Clone + Send + Sync + 'static, + Algorithm::Difficulty: Send, { register_pow_inherent_data_provider(&inherent_data_providers)?; @@ -556,7 +559,7 @@ pub fn start_mining_worker( C: ProvideRuntimeApi + BlockchainEvents + 'static, S: SelectChain + 'static, Algorithm: PowAlgorithm + Clone, - Algorithm::Difficulty: 'static, + Algorithm::Difficulty: Send + 'static, E: Environment + Send + Sync + 'static, E::Error: std::fmt::Debug, E::Proposer: Proposer>, diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index d64596e48cf1a..18844e51ce418 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{pin::Pin, time::Duration, collections::HashMap, any::Any, borrow::Cow}; +use std::{pin::Pin, time::Duration, collections::HashMap, borrow::Cow}; use sc_client_api::ImportNotifications; use sp_runtime::{DigestItem, traits::Block as BlockT, generic::BlockId}; use sp_consensus::{Proposal, BlockOrigin, BlockImportParams, import_queue::BoxBlockImport}; @@ -68,7 +68,8 @@ impl MiningWorker where Block: BlockT, C: sp_api::ProvideRuntimeApi, Algorithm: PowAlgorithm, - Algorithm::Difficulty: 'static, + Algorithm::Difficulty: 'static + Send, + sp_api::TransactionFor: Send + 'static, { /// Get the current best hash. `None` if the worker has just started or the client is doing /// major syncing. @@ -94,7 +95,7 @@ impl MiningWorker where /// Submit a mined seal. The seal will be validated again. Returns true if the submission is /// successful. - pub fn submit(&mut self, seal: Seal) -> bool { + pub async fn submit(&mut self, seal: Seal) -> bool { if let Some(build) = self.build.take() { match self.algorithm.verify( &BlockId::Hash(build.metadata.best_hash), @@ -135,10 +136,10 @@ impl MiningWorker where import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(intermediate) as Box + Box::new(intermediate) as Box<_>, ); - match self.block_import.import_block(import_block, HashMap::default()) { + match self.block_import.import_block(import_block, HashMap::default()).await { Ok(_) => { info!( target: "pow", diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 7ca413630e26e..64beea50fcf63 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -28,11 +28,12 @@ sp-api = { version = "3.0.0", path = "../../../primitives/api" } sc-telemetry = { version = "3.0.0", path = "../../telemetry" } sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } +sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } futures = "0.3.9" futures-timer = "3.0.1" -parking_lot = "0.11.1" log = "0.4.11" thiserror = "1.0.21" +async-trait = "0.1.42" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 037402260c0d3..c1f13fea1f9ef 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -32,12 +32,11 @@ pub use slots::SlotInfo; use slots::Slots; pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; -use std::{fmt::Debug, ops::Deref, pin::Pin, sync::Arc, time::Duration}; +use std::{fmt::Debug, ops::Deref, time::Duration}; use codec::{Decode, Encode}; -use futures::{prelude::*, future::{self, Either}}; +use futures::{future::Either, Future, TryFutureExt}; use futures_timer::Delay; use log::{debug, error, info, warn}; -use parking_lot::Mutex; use sp_api::{ProvideRuntimeApi, ApiRef}; use sp_arithmetic::traits::BaseArithmetic; use sp_consensus::{BlockImport, Proposer, SyncOracle, SelectChain, CanAuthorWith, SlotData}; @@ -68,21 +67,23 @@ pub struct SlotResult { /// /// The implementation should not make any assumptions of the slot being bound to the time or /// similar. The only valid assumption is that the slot number is always increasing. +#[async_trait::async_trait] pub trait SlotWorker { /// Called when a new slot is triggered. /// /// Returns a future that resolves to a [`SlotResult`] iff a block was successfully built in /// the slot. Otherwise `None` is returned. - fn on_slot( + async fn on_slot( &mut self, chain_head: B::Header, slot_info: SlotInfo, - ) -> Pin>> + Send>>; + ) -> Option>; } /// A skeleton implementation for `SlotWorker` which tries to claim a slot at /// its beginning and tries to produce a block if successfully claimed, timing /// out if block production takes too long. +#[async_trait::async_trait] pub trait SimpleSlotWorker { /// A handle to a `BlockImport`. type BlockImport: BlockImport>::Transaction> @@ -96,7 +97,7 @@ pub trait SimpleSlotWorker { + Send + Unpin + 'static; /// The type of proposer to use to build blocks. - type Proposer: Proposer; + type Proposer: Proposer + Send; /// Data associated with a slot claim. type Claim: Send + 'static; @@ -108,7 +109,7 @@ pub trait SimpleSlotWorker { fn logging_target(&self) -> &'static str; /// A handle to a `BlockImport`. - fn block_import(&self) -> Arc>; + fn block_import(&mut self) -> &mut Self::BlockImport; /// Returns the epoch data necessary for authoring. For time-dependent epochs, /// use the provided slot number as a canonical source of time. @@ -191,36 +192,38 @@ pub trait SimpleSlotWorker { ) -> Duration; /// Implements [`SlotWorker::on_slot`]. - fn on_slot( + async fn on_slot( &mut self, chain_head: B::Header, slot_info: SlotInfo, - ) -> Pin>::Proof>>> + Send>> - where - >::Proposal: Unpin + Send + 'static, - { + ) -> Option>::Proof>> { let (timestamp, slot) = (slot_info.timestamp, slot_info.slot); let telemetry = self.telemetry(); + let logging_target = self.logging_target(); let proposing_remaining_duration = self.proposing_remaining_duration(&chain_head, &slot_info); let proposing_remaining = if proposing_remaining_duration == Duration::default() { debug!( - target: self.logging_target(), + target: logging_target, "Skipping proposal slot {} since there's no time left to propose", slot, ); - return Box::pin(future::ready(None)); + return None } else { - Box::new(Delay::new(proposing_remaining_duration)) - as Box + Unpin + Send> + Delay::new(proposing_remaining_duration) }; let epoch_data = match self.epoch_data(&chain_head, slot) { Ok(epoch_data) => epoch_data, Err(err) => { - warn!("Unable to fetch epoch data at block {:?}: {:?}", chain_head.hash(), err); + warn!( + target: logging_target, + "Unable to fetch epoch data at block {:?}: {:?}", + chain_head.hash(), + err, + ); telemetry!( telemetry; @@ -230,7 +233,7 @@ pub trait SimpleSlotWorker { "err" => ?err, ); - return Box::pin(future::ready(None)); + return None; } }; @@ -242,7 +245,7 @@ pub trait SimpleSlotWorker { self.sync_oracle().is_offline() && authorities_len.map(|a| a > 1).unwrap_or(false) { - debug!(target: self.logging_target(), "Skipping proposal slot. Waiting for the network."); + debug!(target: logging_target, "Skipping proposal slot. Waiting for the network."); telemetry!( telemetry; CONSENSUS_DEBUG; @@ -250,16 +253,16 @@ pub trait SimpleSlotWorker { "authorities_len" => authorities_len, ); - return Box::pin(future::ready(None)); + return None; } let claim = match self.claim_slot(&chain_head, slot, &epoch_data) { - None => return Box::pin(future::ready(None)), + None => return None, Some(claim) => claim, }; if self.should_backoff(slot, &chain_head) { - return Box::pin(future::ready(None)); + return None; } debug!( @@ -274,13 +277,18 @@ pub trait SimpleSlotWorker { CONSENSUS_DEBUG; "slots.starting_authorship"; "slot_num" => *slot, - "timestamp" => timestamp, + "timestamp" => *timestamp, ); - let awaiting_proposer = { - let telemetry = telemetry.clone(); - self.proposer(&chain_head).map_err(move |err| { - warn!("Unable to author block in slot {:?}: {:?}", slot, err); + let proposer = match self.proposer(&chain_head).await { + Ok(p) => p, + Err(err) => { + warn!( + target: logging_target, + "Unable to author block in slot {:?}: {:?}", + slot, + err, + ); telemetry!( telemetry; @@ -290,8 +298,8 @@ pub trait SimpleSlotWorker { "err" => ?err ); - err - }) + return None + } }; let logs = self.pre_digest_data(slot, &claim); @@ -299,106 +307,127 @@ pub trait SimpleSlotWorker { // deadline our production to 98% of the total time left for proposing. As we deadline // the proposing below to the same total time left, the 2% margin should be enough for // the result to be returned. - let proposing = awaiting_proposer.and_then(move |proposer| proposer.propose( + let proposing = proposer.propose( slot_info.inherent_data, sp_runtime::generic::Digest { logs, }, proposing_remaining_duration.mul_f32(0.98), - ).map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e)))); - - let proposal_work = { - let telemetry = telemetry.clone(); - futures::future::select(proposing, proposing_remaining).map(move |v| match v { - Either::Left((b, _)) => b.map(|b| (b, claim)), - Either::Right(_) => { - info!( - "⌛️ Discarding proposal for slot {}; block production took too long", - slot, - ); - // If the node was compiled with debug, tell the user to use release optimizations. - #[cfg(build_type="debug")] - info!("👉 Recompile your node in `--release` mode to mitigate this problem."); - telemetry!( - telemetry; - CONSENSUS_INFO; - "slots.discarding_proposal_took_too_long"; - "slot" => *slot, - ); + ).map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))); - Err(sp_consensus::Error::ClientImport("Timeout in the Slots proposer".into())) - }, - }) + let proposal = match futures::future::select(proposing, proposing_remaining).await { + Either::Left((Ok(p), _)) => p, + Either::Left((Err(err), _)) => { + warn!( + target: logging_target, + "Proposing failed: {:?}", + err, + ); + + return None + }, + Either::Right(_) => { + info!( + target: logging_target, + "⌛️ Discarding proposal for slot {}; block production took too long", + slot, + ); + // If the node was compiled with debug, tell the user to use release optimizations. + #[cfg(build_type="debug")] + info!( + target: logging_target, + "👉 Recompile your node in `--release` mode to mitigate this problem.", + ); + telemetry!( + telemetry; + CONSENSUS_INFO; + "slots.discarding_proposal_took_too_long"; + "slot" => *slot, + ); + + return None + }, }; let block_import_params_maker = self.block_import_params(); let block_import = self.block_import(); - let logging_target = self.logging_target(); - proposal_work.and_then(move |(proposal, claim)| async move { - let (block, storage_proof) = (proposal.block, proposal.proof); - let (header, body) = block.deconstruct(); - let header_num = *header.number(); - let header_hash = header.hash(); - let parent_hash = *header.parent_hash(); - - let block_import_params = block_import_params_maker( - header, - &header_hash, - body.clone(), - proposal.storage_changes, - claim, - epoch_data, - )?; - - info!( - "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", - header_num, - block_import_params.post_hash(), - header_hash, - ); - - telemetry!( - telemetry; - CONSENSUS_INFO; - "slots.pre_sealed_block"; - "header_num" => ?header_num, - "hash_now" => ?block_import_params.post_hash(), - "hash_previously" => ?header_hash, - ); - - let header = block_import_params.post_header(); - if let Err(err) = block_import.lock().import_block(block_import_params, Default::default()) { + let (block, storage_proof) = (proposal.block, proposal.proof); + let (header, body) = block.deconstruct(); + let header_num = *header.number(); + let header_hash = header.hash(); + let parent_hash = *header.parent_hash(); + + let block_import_params = match block_import_params_maker( + header, + &header_hash, + body.clone(), + proposal.storage_changes, + claim, + epoch_data, + ) { + Ok(bi) => bi, + Err(err) => { warn!( target: logging_target, - "Error with block built on {:?}: {:?}", - parent_hash, + "Failed to create block import params: {:?}", err, ); - telemetry!( - telemetry; - CONSENSUS_WARN; - "slots.err_with_block_built_on"; - "hash" => ?parent_hash, - "err" => ?err, - ); + return None } + }; - Ok(SlotResult { block: B::new(header, body), storage_proof }) - }).then(|r| async move { - r.map_err(|e| warn!(target: "slots", "Encountered consensus error: {:?}", e)).ok() - }).boxed() + info!( + target: logging_target, + "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", + header_num, + block_import_params.post_hash(), + header_hash, + ); + + telemetry!( + telemetry; + CONSENSUS_INFO; + "slots.pre_sealed_block"; + "header_num" => ?header_num, + "hash_now" => ?block_import_params.post_hash(), + "hash_previously" => ?header_hash, + ); + + let header = block_import_params.post_header(); + if let Err(err) = block_import + .import_block(block_import_params, Default::default()) + .await + { + warn!( + target: logging_target, + "Error with block built on {:?}: {:?}", + parent_hash, + err, + ); + + telemetry!( + telemetry; + CONSENSUS_WARN; + "slots.err_with_block_built_on"; + "hash" => ?parent_hash, + "err" => ?err, + ); + } + + Some(SlotResult { block: B::new(header, body), storage_proof }) } } -impl> SlotWorker>::Proof> for T { - fn on_slot( +#[async_trait::async_trait] +impl + Send> SlotWorker>::Proof> for T { + async fn on_slot( &mut self, chain_head: B::Header, slot_info: SlotInfo, - ) -> Pin>::Proof>>> + Send>> { - SimpleSlotWorker::on_slot(self, chain_head, slot_info) + ) -> Option>::Proof>> { + SimpleSlotWorker::on_slot(self, chain_head, slot_info).await } } @@ -408,7 +437,7 @@ pub trait SlotCompatible { fn extract_timestamp_and_slot( &self, inherent: &InherentData, - ) -> Result<(u64, Slot, std::time::Duration), sp_consensus::Error>; + ) -> Result<(sp_timestamp::Timestamp, Slot, std::time::Duration), sp_consensus::Error>; } /// Start a new slot worker. @@ -436,25 +465,39 @@ where let SlotDuration(slot_duration) = slot_duration; // rather than use a timer interval, we schedule our waits ourselves - Slots::::new( + let mut slots = Slots::::new( slot_duration.slot_duration(), inherent_data_providers, timestamp_extractor, - ).inspect_err(|e| debug!(target: "slots", "Faulty timer: {:?}", e)) - .try_for_each(move |slot_info| { + ); + + async move { + loop { + let slot_info = match slots.next_slot().await { + Ok(slot) => slot, + Err(err) => { + debug!(target: "slots", "Faulty timer: {:?}", err); + return + }, + }; + // only propose when we are not syncing. if sync_oracle.is_major_syncing() { debug!(target: "slots", "Skipping proposal slot due to sync."); - return Either::Right(future::ready(Ok(()))); + continue; } let slot = slot_info.slot; let chain_head = match client.best_chain() { Ok(x) => x, Err(e) => { - warn!(target: "slots", "Unable to author block in slot {}. \ - no best block header: {:?}", slot, e); - return Either::Right(future::ready(Ok(()))); + warn!( + target: "slots", + "Unable to author block in slot {}. No best block header: {:?}", + slot, + e, + ); + continue; } }; @@ -466,19 +509,11 @@ where slot, err, ); - Either::Right(future::ready(Ok(()))) } else { - Either::Left( - worker.on_slot(chain_head, slot_info) - .then(|_| future::ready(Ok(()))) - ) + worker.on_slot(chain_head, slot_info).await; } - }).then(|res| { - if let Err(err) = res { - warn!(target: "slots", "Slots stream terminated with an error: {:?}", err); - } - future::ready(()) - }) + } + } } /// A header which has been checked @@ -514,10 +549,7 @@ impl Deref for SlotDuration { } impl SlotData for SlotDuration { - /// Get the slot duration in milliseconds. - fn slot_duration(&self) -> u64 - where T: SlotData, - { + fn slot_duration(&self) -> std::time::Duration { self.0.slot_duration() } @@ -562,7 +594,7 @@ impl SlotDuration { } }?; - if slot_duration.slot_duration() == 0u64 { + if slot_duration.slot_duration() == Default::default() { return Err(sp_blockchain::Error::Application(Box::new(Error::SlotDurationInvalid(slot_duration)))) } diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index b23d676035696..d7ed1eda64c09 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -22,10 +22,9 @@ use super::{SlotCompatible, Slot}; use sp_consensus::Error; -use futures::{prelude::*, task::Context, task::Poll}; use sp_inherents::{InherentData, InherentDataProviders}; -use std::{pin::Pin, time::{Duration, Instant}}; +use std::time::{Duration, Instant}; use futures_timer::Delay; /// Returns current duration since unix epoch. @@ -52,7 +51,7 @@ pub struct SlotInfo { /// The slot number. pub slot: Slot, /// Current timestamp. - pub timestamp: u64, + pub timestamp: sp_timestamp::Timestamp, /// The instant at which the slot ends. pub ends_at: Instant, /// The inherent data. @@ -61,6 +60,26 @@ pub struct SlotInfo { pub duration: Duration, } +impl SlotInfo { + /// Create a new [`SlotInfo`]. + /// + /// `ends_at` is calculated using `timestamp` and `duration`. + pub fn new( + slot: Slot, + timestamp: sp_timestamp::Timestamp, + inherent_data: InherentData, + duration: Duration, + ) -> Self { + Self { + slot, + timestamp, + inherent_data, + duration, + ends_at: Instant::now() + time_until_next(timestamp.as_duration(), duration), + } + } +} + /// A stream that returns every time there is a new slot. pub(crate) struct Slots { last_slot: Slot, @@ -73,13 +92,13 @@ pub(crate) struct Slots { impl Slots { /// Create a new `Slots` stream. pub fn new( - slot_duration: u64, + slot_duration: Duration, inherent_data_providers: InherentDataProviders, timestamp_extractor: SC, ) -> Self { Slots { last_slot: 0.into(), - slot_duration: Duration::from_millis(slot_duration), + slot_duration, inner_delay: None, inherent_data_providers, timestamp_extractor, @@ -87,59 +106,49 @@ impl Slots { } } -impl Stream for Slots { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { +impl Slots { + /// Returns a future that fires when the next slot starts. + pub async fn next_slot(&mut self) -> Result { loop { - let slot_duration = self.slot_duration; self.inner_delay = match self.inner_delay.take() { None => { // schedule wait. - let wait_dur = time_until_next(duration_now(), slot_duration); + let wait_dur = time_until_next(duration_now(), self.slot_duration); Some(Delay::new(wait_dur)) } Some(d) => Some(d), }; - if let Some(ref mut inner_delay) = self.inner_delay { - match Future::poll(Pin::new(inner_delay), cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(()) => {} - } + if let Some(inner_delay) = self.inner_delay.take() { + inner_delay.await; } - // timeout has fired. let inherent_data = match self.inherent_data_providers.create_inherent_data() { Ok(id) => id, - Err(err) => return Poll::Ready(Some(Err(sp_consensus::Error::InherentData(err)))), + Err(err) => return Err(sp_consensus::Error::InherentData(err)), }; let result = self.timestamp_extractor.extract_timestamp_and_slot(&inherent_data); let (timestamp, slot, offset) = match result { Ok(v) => v, - Err(err) => return Poll::Ready(Some(Err(err))), + Err(err) => return Err(err), }; // reschedule delay for next slot. let ends_in = offset + - time_until_next(Duration::from_millis(timestamp), slot_duration); - let ends_at = Instant::now() + ends_in; + time_until_next(timestamp.as_duration(), self.slot_duration); self.inner_delay = Some(Delay::new(ends_in)); // never yield the same slot twice. if slot > self.last_slot { self.last_slot = slot; - break Poll::Ready(Some(Ok(SlotInfo { + break Ok(SlotInfo::new( slot, - duration: self.slot_duration, timestamp, - ends_at, inherent_data, - }))) + self.slot_duration, + )) } } } } - -impl Unpin for Slots {} diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 72c26fead1c1c..e5e52494c2db6 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -35,7 +35,7 @@ sp-trie = { version = "3.0.0", path = "../../primitives/trie" } sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } sp-database = { version = "3.0.0", path = "../../primitives/database" } -parity-db = { version = "0.2.2", optional = true } +parity-db = { version = "0.2.3", optional = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } [dev-dependencies] diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 6233eab3ea396..8051adc1832bc 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -955,7 +955,8 @@ mod tests { let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0); let config1 = Some(ChangesTrieConfiguration::new(2, 6)); let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1); - backend.finalize_block(BlockId::Number(1), Some(vec![42])).unwrap(); + let just1 = Some((*b"TEST", vec![42])); + backend.finalize_block(BlockId::Number(1), just1).unwrap(); let config2 = Some(ChangesTrieConfiguration::new(2, 7)); let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2); let config2_1 = Some(ChangesTrieConfiguration::new(2, 8)); diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 6654083939dae..03a6ce2200957 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -57,10 +57,11 @@ use sc_client_api::{ UsageInfo, MemoryInfo, IoInfo, MemorySize, backend::{NewBlockState, PrunableStateChangesTrieStorage, ProvideChtRoots}, leaves::{LeafSet, FinalizationDisplaced}, cht, + utils::is_descendent_of, }; use sp_blockchain::{ Result as ClientResult, Error as ClientError, - well_known_cache_keys, HeaderBackend, + well_known_cache_keys, Backend as _, HeaderBackend, }; use codec::{Decode, Encode}; use hash_db::Prefix; @@ -70,14 +71,14 @@ use sp_core::{Hasher, ChangesTrieConfiguration}; use sp_core::offchain::OffchainOverlayedChange; use sp_core::storage::{well_known_keys, ChildInfo}; use sp_arithmetic::traits::Saturating; -use sp_runtime::{generic::{DigestItem, BlockId}, Justification, Storage}; +use sp_runtime::{generic::{DigestItem, BlockId}, Justification, Justifications, Storage}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, HashFor, }; use sp_state_machine::{ DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, StorageCollection, ChildStorageCollection, OffchainChangesCollection, - backend::Backend as StateBackend, StateMachineStats, + backend::Backend as StateBackend, StateMachineStats, IndexOperation, }; use crate::utils::{DatabaseType, Meta, meta_keys, read_db, read_meta}; use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; @@ -106,7 +107,16 @@ pub type DbState = sp_state_machine::TrieBackend< const DB_HASH_LEN: usize = 32; /// Hash type that this backend uses for the database. -pub type DbHash = [u8; DB_HASH_LEN]; +pub type DbHash = sp_core::H256; + +/// This is used as block body when storage-chain mode is enabled. +#[derive(Debug, Encode, Decode)] +struct ExtrinsicHeader { + /// Hash of the indexed part + indexed_hash: DbHash, // Zero hash if there's no indexed data + /// The rest of the data. + data: Vec, +} /// A reference tracking state. /// @@ -351,7 +361,7 @@ pub(crate) mod columns { pub const KEY_LOOKUP: u32 = 3; pub const HEADER: u32 = 4; pub const BODY: u32 = 5; - pub const JUSTIFICATION: u32 = 6; + pub const JUSTIFICATIONS: u32 = 6; pub const CHANGES_TRIE: u32 = 7; pub const AUX: u32 = 8; /// Offchain workers local storage @@ -363,7 +373,7 @@ pub(crate) mod columns { struct PendingBlock { header: Block::Header, - justification: Option, + justifications: Option, body: Option>, leaf_state: NewBlockState, } @@ -505,42 +515,56 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha impl sc_client_api::blockchain::Backend for BlockchainDb { fn body(&self, id: BlockId) -> ClientResult>> { - match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { - Some(body) => { - match self.transaction_storage { - TransactionStorageMode::BlockBody => match Decode::decode(&mut &body[..]) { - Ok(body) => Ok(Some(body)), - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding body: {}", err) - )), - }, - TransactionStorageMode::StorageChain => { - match Vec::::decode(&mut &body[..]) { - Ok(hashes) => { - let extrinsics: ClientResult> = hashes.into_iter().map( - |h| self.extrinsic(&h).and_then(|maybe_ex| maybe_ex.ok_or_else( - || sp_blockchain::Error::Backend( - format!("Missing transaction: {}", h)))) - ).collect(); - Ok(Some(extrinsics?)) + let body = match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { + Some(body) => body, + None => return Ok(None), + }; + match self.transaction_storage { + TransactionStorageMode::BlockBody => match Decode::decode(&mut &body[..]) { + Ok(body) => Ok(Some(body)), + Err(err) => return Err(sp_blockchain::Error::Backend( + format!("Error decoding body: {}", err) + )), + }, + TransactionStorageMode::StorageChain => { + match Vec::::decode(&mut &body[..]) { + Ok(index) => { + let extrinsics: ClientResult> = index.into_iter().map( + | ExtrinsicHeader { indexed_hash, data } | { + let decode_result = if indexed_hash != Default::default() { + match self.db.get(columns::TRANSACTION, indexed_hash.as_ref()) { + Some(t) => { + let mut input = utils::join_input(data.as_ref(), t.as_ref()); + Block::Extrinsic::decode(&mut input) + }, + None => return Err(sp_blockchain::Error::Backend( + format!("Missing indexed transaction {:?}", indexed_hash)) + ) + } + } else { + Block::Extrinsic::decode(&mut data.as_ref()) + }; + decode_result.map_err(|err| sp_blockchain::Error::Backend( + format!("Error decoding extrinsic: {}", err)) + ) } - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding body list: {}", err) - )), - } + ).collect(); + Ok(Some(extrinsics?)) } + Err(err) => return Err(sp_blockchain::Error::Backend( + format!("Error decoding body list: {}", err) + )), } } - None => Ok(None), } } - fn justification(&self, id: BlockId) -> ClientResult> { - match read_db(&*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATION, id)? { - Some(justification) => match Decode::decode(&mut &justification[..]) { - Ok(justification) => Ok(Some(justification)), + fn justifications(&self, id: BlockId) -> ClientResult> { + match read_db(&*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATIONS, id)? { + Some(justifications) => match Decode::decode(&mut &justifications[..]) { + Ok(justifications) => Ok(Some(justifications)), Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding justification: {}", err) + format!("Error decoding justifications: {}", err) )), } None => Ok(None), @@ -563,21 +587,11 @@ impl sc_client_api::blockchain::Backend for BlockchainDb ClientResult> { - match self.db.get(columns::TRANSACTION, hash.as_ref()) { - Some(ex) => { - match Decode::decode(&mut &ex[..]) { - Ok(ex) => Ok(Some(ex)), - Err(err) => Err(sp_blockchain::Error::Backend( - format!("Error decoding extrinsic {}: {}", hash, err) - )), - } - }, - None => Ok(None), - } + fn indexed_transaction(&self, hash: &Block::Hash) -> ClientResult>> { + Ok(self.db.get(columns::TRANSACTION, hash.as_ref())) } - fn have_extrinsic(&self, hash: &Block::Hash) -> ClientResult { + fn has_indexed_transaction(&self, hash: &Block::Hash) -> ClientResult { Ok(self.db.contains(columns::TRANSACTION, hash.as_ref())) } } @@ -680,6 +694,7 @@ pub struct BlockImportOperation { finalized_blocks: Vec<(BlockId, Option)>, set_head: Option>, commit_state: bool, + index_ops: Vec, } impl BlockImportOperation { @@ -716,7 +731,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc &mut self, header: Block::Header, body: Option>, - justification: Option, + justifications: Option, leaf_state: NewBlockState, ) -> ClientResult<()> { assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); @@ -726,7 +741,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc self.pending_block = Some(PendingBlock { header, body, - justification, + justifications, leaf_state, }); Ok(()) @@ -822,6 +837,11 @@ impl sc_client_api::backend::BlockImportOperation for Bloc self.set_head = Some(block); Ok(()) } + + fn update_transaction_index(&mut self, index_ops: Vec) -> ClientResult<()> { + self.index_ops = index_ops; + Ok(()) + } } struct StorageDb { @@ -1130,9 +1150,9 @@ impl Backend { if let Some(justification) = justification { transaction.set_from_vec( - columns::JUSTIFICATION, + columns::JUSTIFICATIONS, &utils::number_and_hash_to_lookup_key(number, hash)?, - justification.encode(), + Justifications::from(justification).encode(), ); } Ok((*hash, number, false, true)) @@ -1154,21 +1174,21 @@ impl Backend { if new_canonical <= self.storage.state_db.best_canonical().unwrap_or(0) { return Ok(()) } - let hash = if new_canonical == number_u64 { hash } else { - ::sc_client_api::blockchain::HeaderBackend::hash(&self.blockchain, new_canonical.saturated_into())? - .expect("existence of block with number `new_canonical` \ - implies existence of blocks with all numbers before it; qed") + sc_client_api::blockchain::HeaderBackend::hash( + &self.blockchain, + new_canonical.saturated_into(), + )?.expect("existence of block with number `new_canonical` \ + implies existence of blocks with all numbers before it; qed") }; trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); let commit = self.storage.state_db.canonicalize_block(&hash) .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; apply_state_commit(transaction, commit); - }; - + } Ok(()) } @@ -1224,25 +1244,19 @@ impl Backend { )?; transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode()); - if let Some(body) = &pending_block.body { + if let Some(body) = pending_block.body { match self.transaction_storage { TransactionStorageMode::BlockBody => { transaction.set_from_vec(columns::BODY, &lookup_key, body.encode()); }, TransactionStorageMode::StorageChain => { - let mut hashes = Vec::with_capacity(body.len()); - for extrinsic in body { - let extrinsic = extrinsic.encode(); - let hash = HashFor::::hash(&extrinsic); - transaction.set(columns::TRANSACTION, &hash.as_ref(), &extrinsic); - hashes.push(hash); - } - transaction.set_from_vec(columns::BODY, &lookup_key, hashes.encode()); + let body = apply_index_ops::(&mut transaction, body, operation.index_ops); + transaction.set_from_vec(columns::BODY, &lookup_key, body); }, } } - if let Some(justification) = pending_block.justification { - transaction.set_from_vec(columns::JUSTIFICATION, &lookup_key, justification.encode()); + if let Some(justifications) = pending_block.justifications { + transaction.set_from_vec(columns::JUSTIFICATIONS, &lookup_key, justifications.encode()); } if number.is_zero() { @@ -1409,7 +1423,7 @@ impl Backend { self.storage.db.commit(transaction)?; - // Apply all in-memory state shanges. + // Apply all in-memory state changes. // Code beyond this point can't fail. if let Some(( @@ -1490,8 +1504,8 @@ impl Backend { } } - self.prune_blocks(transaction, f_num)?; let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); + self.prune_blocks(transaction, f_num, &new_displaced)?; match displaced { x @ &mut None => *x = Some(new_displaced), &mut Some(ref mut displaced) => displaced.merge(new_displaced), @@ -1504,47 +1518,83 @@ impl Backend { &self, transaction: &mut Transaction, finalized: NumberFor, + displaced: &FinalizationDisplaced>, ) -> ClientResult<()> { if let KeepBlocks::Some(keep_blocks) = self.keep_blocks { // Always keep the last finalized block let keep = std::cmp::max(keep_blocks, 1); - if finalized < keep.into() { - return Ok(()) + if finalized >= keep.into() { + let number = finalized.saturating_sub(keep.into()); + self.prune_block(transaction, BlockId::::number(number))?; } - let number = finalized.saturating_sub(keep.into()); - match read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY, BlockId::::number(number))? { - Some(body) => { - debug!(target: "db", "Removing block #{}", number); - utils::remove_from_db( - transaction, - &*self.storage.db, - columns::KEY_LOOKUP, - columns::BODY, - BlockId::::number(number), - )?; - match self.transaction_storage { - TransactionStorageMode::BlockBody => {}, - TransactionStorageMode::StorageChain => { - match Vec::::decode(&mut &body[..]) { - Ok(hashes) => { - for h in hashes { - transaction.remove(columns::TRANSACTION, h.as_ref()); + + // Also discard all blocks from displaced branches + for h in displaced.leaves() { + let mut number = finalized; + let mut hash = h.clone(); + // Follow displaced chains back until we reach a finalized block. + // Since leaves are discarded due to finality, they can't have parents + // that are canonical, but not yet finalized. So we stop deletig as soon as + // we reach canonical chain. + while self.blockchain.hash(number)? != Some(hash.clone()) { + let id = BlockId::::hash(hash.clone()); + match self.blockchain.header(id)? { + Some(header) => { + self.prune_block(transaction, id)?; + number = header.number().saturating_sub(One::one()); + hash = header.parent_hash().clone(); + }, + None => break, + } + } + } + } + Ok(()) + } + + fn prune_block( + &self, + transaction: &mut Transaction, + id: BlockId, + ) -> ClientResult<()> { + match read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY, id)? { + Some(body) => { + debug!(target: "db", "Removing block #{}", id); + utils::remove_from_db( + transaction, + &*self.storage.db, + columns::KEY_LOOKUP, + columns::BODY, + id, + )?; + match self.transaction_storage { + TransactionStorageMode::BlockBody => {}, + TransactionStorageMode::StorageChain => { + match Vec::::decode(&mut &body[..]) { + Ok(body) => { + for ExtrinsicHeader { indexed_hash, .. } in body { + if indexed_hash != Default::default() { + transaction.release( + columns::TRANSACTION, + indexed_hash, + ); } } - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding body list: {}", err) - )), } + Err(err) => return Err(sp_blockchain::Error::Backend( + format!("Error decoding body list: {}", err) + )), } } } - None => return Ok(()), } + None => return Ok(()), } Ok(()) } } + fn apply_state_commit(transaction: &mut Transaction, commit: sc_state_db::CommitSet>) { for (key, val) in commit.data.inserted.into_iter() { transaction.set_from_vec(columns::STATE, &key[..], val); @@ -1560,6 +1610,67 @@ fn apply_state_commit(transaction: &mut Transaction, commit: sc_state_db } } +fn apply_index_ops( + transaction: &mut Transaction, + body: Vec, + ops: Vec, +) -> Vec { + let mut extrinsic_headers: Vec = Vec::with_capacity(body.len()); + let mut index_map = HashMap::new(); + let mut renewed_map = HashMap::new(); + for op in ops { + match op { + IndexOperation::Insert { extrinsic, offset } => { + index_map.insert(extrinsic, offset); + } + IndexOperation::Renew { extrinsic, hash, .. } => { + renewed_map.insert(extrinsic, DbHash::from_slice(hash.as_ref())); + } + } + } + for (index, extrinsic) in body.into_iter().enumerate() { + let extrinsic = extrinsic.encode(); + let extrinsic_header = if let Some(hash) = renewed_map.get(&(index as u32)) { + // Bump ref counter + transaction.reference(columns::TRANSACTION, DbHash::from_slice(hash.as_ref())); + ExtrinsicHeader { + indexed_hash: hash.clone(), + data: extrinsic, + } + } else { + match index_map.get(&(index as u32)) { + Some(offset) if *offset as usize <= extrinsic.len() => { + let offset = *offset as usize; + let hash = HashFor::::hash(&extrinsic[offset..]); + transaction.store( + columns::TRANSACTION, + DbHash::from_slice(hash.as_ref()), + extrinsic[offset..].to_vec(), + ); + ExtrinsicHeader { + indexed_hash: DbHash::from_slice(hash.as_ref()), + data: extrinsic[..offset].to_vec(), + } + }, + _ => { + ExtrinsicHeader { + indexed_hash: Default::default(), + data: extrinsic, + } + } + } + }; + extrinsic_headers.push(extrinsic_header); + } + debug!( + target: "db", + "DB transaction index: {} inserted, {} renewed", + index_map.len(), + renewed_map.len() + ); + extrinsic_headers.encode() +} + impl sc_client_api::backend::AuxStore for Backend where Block: BlockT { fn insert_aux< 'a, @@ -1608,6 +1719,7 @@ impl sc_client_api::backend::Backend for Backend { finalized_blocks: Vec::new(), set_head: None, commit_state: false, + index_ops: Default::default(), }) } @@ -1668,6 +1780,50 @@ impl sc_client_api::backend::Backend for Backend { Ok(()) } + fn append_justification( + &self, + block: BlockId, + justification: Justification, + ) -> ClientResult<()> { + let mut transaction: Transaction = Transaction::new(); + let hash = self.blockchain.expect_block_hash_from_id(&block)?; + let header = self.blockchain.expect_header(block)?; + let number = *header.number(); + + // Check if the block is finalized first. + let is_descendent_of = is_descendent_of(&self.blockchain, None); + let last_finalized = self.blockchain.last_finalized()?; + + // We can do a quick check first, before doing a proper but more expensive check + if number > self.blockchain.info().finalized_number + || (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?) + { + return Err(ClientError::NotInFinalizedChain); + } + + let justifications = + if let Some(mut stored_justifications) = self.blockchain.justifications(block)? { + if !stored_justifications.append(justification) { + return Err(ClientError::BadJustification( + "Duplicate consensus engine ID".into() + )); + } + stored_justifications + } else { + Justifications::from(justification) + }; + + transaction.set_from_vec( + columns::JUSTIFICATIONS, + &utils::number_and_hash_to_lookup_key(number, hash)?, + justifications.encode(), + ); + + self.storage.db.commit(transaction)?; + + Ok(()) + } + fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { Some(&self.changes_tries_storage) } @@ -1918,12 +2074,16 @@ pub(crate) mod tests { use sp_core::H256; use sc_client_api::backend::{Backend as BTrait, BlockImportOperation as Op}; use sc_client_api::blockchain::Backend as BLBTrait; + use sp_runtime::ConsensusEngineId; use sp_runtime::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; use sp_runtime::traits::{Hash, BlakeTwo256}; use sp_runtime::generic::DigestItem; use sp_state_machine::{TrieMut, TrieDBMut}; use sp_blockchain::{lowest_common_ancestor, tree_route}; + const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0"; + const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1"; + pub(crate) type Block = RawBlock>; pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { @@ -1949,7 +2109,7 @@ pub(crate) mod tests { changes: Option, Vec)>>, extrinsics_root: H256, ) -> H256 { - insert_block(backend, number, parent_hash, changes, extrinsics_root, Vec::new()) + insert_block(backend, number, parent_hash, changes, extrinsics_root, Vec::new(), None) } pub fn insert_block( @@ -1959,6 +2119,7 @@ pub(crate) mod tests { changes: Option, Vec)>>, extrinsics_root: H256, body: Vec>, + transaction_index: Option>, ) -> H256 { use sp_runtime::testing::Digest; @@ -1986,6 +2147,9 @@ pub(crate) mod tests { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, block_id).unwrap(); op.set_block_data(header, Some(body), None, NewBlockState::Best).unwrap(); + if let Some(index) = transaction_index { + op.update_transaction_index(index).unwrap(); + } op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap(); backend.commit_operation(op).unwrap(); @@ -2511,12 +2675,47 @@ pub(crate) mod tests { let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); let _ = insert_header(&backend, 1, block0, None, Default::default()); - let justification = Some(vec![1, 2, 3]); + let justification = Some((CONS0_ENGINE_ID, vec![1, 2, 3])); backend.finalize_block(BlockId::Number(1), justification.clone()).unwrap(); assert_eq!( - backend.blockchain().justification(BlockId::Number(1)).unwrap(), - justification, + backend.blockchain().justifications(BlockId::Number(1)).unwrap(), + justification.map(Justifications::from), + ); + } + + #[test] + fn test_append_justification_to_finalized_block() { + use sc_client_api::blockchain::{Backend as BlockChainBackend}; + + let backend = Backend::::new_test(10, 10); + + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let _ = insert_header(&backend, 1, block0, None, Default::default()); + + let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]); + backend.finalize_block( + BlockId::Number(1), + Some(just0.clone().into()), + ).unwrap(); + + let just1 = (CONS1_ENGINE_ID, vec![4, 5]); + backend.append_justification(BlockId::Number(1), just1.clone()).unwrap(); + + let just2 = (CONS1_ENGINE_ID, vec![6, 7]); + assert!(matches!( + backend.append_justification(BlockId::Number(1), just2), + Err(ClientError::BadJustification(_)) + )); + + let justifications = { + let mut just = Justifications::from(just0); + just.append(just1); + just + }; + assert_eq!( + backend.blockchain().justifications(BlockId::Number(1)).unwrap(), + Some(justifications), ); } @@ -2592,7 +2791,7 @@ pub(crate) mod tests { let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0 .. 5 { - let hash = insert_block(&backend, i, prev_hash, None, Default::default(), vec![i.into()]); + let hash = insert_block(&backend, i, prev_hash, None, Default::default(), vec![i.into()], None); blocks.push(hash); prev_hash = hash; } @@ -2613,4 +2812,100 @@ pub(crate) mod tests { assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); } } + + #[test] + fn prune_blocks_on_finalize_with_fork() { + let backend = Backend::::new_test_with_tx_storage( + 2, + 10, + TransactionStorageMode::StorageChain + ); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0 .. 5 { + let hash = insert_block(&backend, i, prev_hash, None, Default::default(), vec![i.into()], None); + blocks.push(hash); + prev_hash = hash; + } + + // insert a fork at block 2 + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + sp_core::H256::random(), + vec![2.into()], + None + ); + insert_block(&backend, 3, fork_hash_root, None, H256::random(), vec![3.into(), 11.into()], None); + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + op.mark_head(BlockId::Hash(blocks[4])).unwrap(); + backend.commit_operation(op).unwrap(); + + for i in 1 .. 5 { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + backend.commit_operation(op).unwrap(); + } + + let bc = backend.blockchain(); + assert_eq!(None, bc.body(BlockId::hash(blocks[0])).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[1])).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[2])).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + } + + #[test] + fn renew_transaction_storage() { + let backend = Backend::::new_test_with_tx_storage( + 2, + 10, + TransactionStorageMode::StorageChain + ); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + let x1 = ExtrinsicWrapper::from(0u64).encode(); + let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); + for i in 0 .. 10 { + let mut index = Vec::new(); + if i == 0 { + index.push(IndexOperation::Insert { extrinsic: 0, offset: 1 }); + } else if i < 5 { + // keep renewing 1st + index.push(IndexOperation::Renew { + extrinsic: 0, + hash: x1_hash.as_ref().to_vec(), + size: (x1.len() - 1) as u32, + }); + } // else stop renewing + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + Some(index) + ); + blocks.push(hash); + prev_hash = hash; + } + + for i in 1 .. 10 { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + backend.commit_operation(op).unwrap(); + let bc = backend.blockchain(); + if i < 6 { + assert!(bc.indexed_transaction(&x1_hash).unwrap().is_some()); + } else { + assert!(bc.indexed_transaction(&x1_hash).unwrap().is_none()); + } + } + } } diff --git a/client/db/src/light.rs b/client/db/src/light.rs index 91f37dd374d9f..bf24197c5b5d9 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -756,7 +756,7 @@ pub(crate) mod tests { #[test] fn finalized_ancient_headers_are_replaced_with_cht() { fn insert_headers Header>(header_producer: F) -> - (Arc>, LightStorage) + (Arc, LightStorage) { let raw_db = Arc::new(sp_database::MemDb::default()); let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs index 71cc5117f19ee..ed39c1e9f669f 100644 --- a/client/db/src/parity_db.rs +++ b/client/db/src/parity_db.rs @@ -33,7 +33,7 @@ fn handle_err(result: parity_db::Result) -> T { } /// Wrap parity-db database into a trait object that implements `sp_database::Database` -pub fn open(path: &std::path::Path, db_type: DatabaseType) +pub fn open>(path: &std::path::Path, db_type: DatabaseType) -> parity_db::Result>> { let mut config = parity_db::Options::with_columns(path, NUM_COLUMNS as u8); @@ -48,7 +48,7 @@ pub fn open(path: &std::path::Path, db_type: DatabaseType) Ok(std::sync::Arc::new(DbAdapter(db))) } -impl Database for DbAdapter { +impl> Database for DbAdapter { fn commit(&self, transaction: Transaction) -> Result<(), DatabaseError> { handle_err(self.0.commit(transaction.0.into_iter().map(|change| match change { @@ -65,7 +65,11 @@ impl Database for DbAdapter { handle_err(self.0.get(col as u8, key)) } - fn lookup(&self, _hash: &H) -> Option> { - unimplemented!(); + fn contains(&self, col: ColumnId, key: &[u8]) -> bool { + handle_err(self.0.get_size(col as u8, key)).is_some() + } + + fn value_size(&self, col: ColumnId, key: &[u8]) -> Option { + handle_err(self.0.get_size(col as u8, key)).map(|s| s as usize) } } diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index b6e49edba1978..ea91b8253e1d8 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -23,17 +23,19 @@ use std::io::{Read, Write, ErrorKind}; use std::path::{Path, PathBuf}; use sp_runtime::traits::Block as BlockT; -use crate::utils::DatabaseType; +use crate::{columns, utils::DatabaseType}; use kvdb_rocksdb::{Database, DatabaseConfig}; +use codec::{Decode, Encode}; /// Version file name. const VERSION_FILE_NAME: &'static str = "db_version"; /// Current db version. -const CURRENT_VERSION: u32 = 2; +const CURRENT_VERSION: u32 = 3; /// Number of columns in v1. const V1_NUM_COLUMNS: u32 = 11; +const V2_NUM_COLUMNS: u32 = 12; /// Upgrade database to current version. pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { @@ -42,7 +44,11 @@ pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> sp_bl let db_version = current_version(db_path)?; match db_version { 0 => Err(sp_blockchain::Error::Backend(format!("Unsupported database version: {}", db_version)))?, - 1 => migrate_1_to_2::(db_path, db_type)?, + 1 => { + migrate_1_to_2::(db_path, db_type)?; + migrate_2_to_3::(db_path, db_type)? + }, + 2 => migrate_2_to_3::(db_path, db_type)?, CURRENT_VERSION => (), _ => Err(sp_blockchain::Error::Backend(format!("Future database version: {}", db_version)))?, } @@ -62,6 +68,36 @@ fn migrate_1_to_2(db_path: &Path, _db_type: DatabaseType) -> sp_b db.add_column().map_err(db_err) } +/// Migration from version2 to version3: +/// - The format of the stored Justification changed to support multiple Justifications. +fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> sp_blockchain::Result<()> { + let db_path = db_path.to_str() + .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; + let db_cfg = DatabaseConfig::with_columns(V2_NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path).map_err(db_err)?; + + // Get all the keys we need to update + let keys: Vec<_> = db.iter(columns::JUSTIFICATIONS).map(|entry| entry.0).collect(); + + // Read and update each entry + let mut transaction = db.transaction(); + for key in keys { + if let Some(justification) = db.get(columns::JUSTIFICATIONS, &key).map_err(db_err)? { + // Tag each justification with the hardcoded ID for GRANDPA to avoid the dependency on + // the GRANDPA crate. + // NOTE: when storing justifications the previous API would get a `Vec` and still + // call encode on it. + let justification = Vec::::decode(&mut &justification[..]) + .map_err(|_| sp_blockchain::Error::Backend("Invalid justification blob".into()))?; + let justifications = sp_runtime::Justifications::from((*b"FRNK", justification)); + transaction.put_vec(columns::JUSTIFICATIONS, &key, justifications.encode()); + } + } + db.write(transaction).map_err(db_err)?; + + Ok(()) +} + /// Reads current database version from the file at given path. /// If the file does not exist returns 0. fn current_version(path: &Path) -> sp_blockchain::Result { @@ -141,8 +177,8 @@ mod tests { } #[test] - fn upgrade_from_1_to_2_works() { - for version_from_file in &[None, Some(1)] { + fn upgrade_to_3_works() { + for version_from_file in &[None, Some(1), Some(2)] { let db_dir = tempfile::TempDir::new().unwrap(); let db_path = db_dir.path(); create_db(db_path, *version_from_file); diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index cd9b2a6f56d41..590b994d50e87 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -278,7 +278,7 @@ pub fn open_database( #[cfg(feature = "with-parity-db")] DatabaseSettingsSrc::ParityDb { path } => { crate::parity_db::open(&path, db_type) - .map_err(|e| sp_blockchain::Error::Backend(format!("{:?}", e)))? + .map_err(|e| sp_blockchain::Error::Backend(format!("{}", e)))? }, #[cfg(not(feature = "with-parity-db"))] DatabaseSettingsSrc::ParityDb { .. } => { @@ -449,10 +449,35 @@ impl DatabaseType { } } +pub(crate) struct JoinInput<'a, 'b>(&'a [u8], &'b [u8]); + +pub(crate) fn join_input<'a, 'b>(i1: &'a[u8], i2: &'b [u8]) -> JoinInput<'a, 'b> { + JoinInput(i1, i2) +} + +impl<'a, 'b> codec::Input for JoinInput<'a, 'b> { + fn remaining_len(&mut self) -> Result, codec::Error> { + Ok(Some(self.0.len() + self.1.len())) + } + + fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { + let mut read = 0; + if self.0.len() > 0 { + read = std::cmp::min(self.0.len(), into.len()); + self.0.read(&mut into[..read])?; + } + if read < into.len() { + self.1.read(&mut into[read..])?; + } + Ok(()) + } +} + #[cfg(test)] mod tests { use super::*; use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; + use codec::Input; type Block = RawBlock>; #[test] @@ -469,4 +494,25 @@ mod tests { assert_eq!(DatabaseType::Full.as_str(), "full"); assert_eq!(DatabaseType::Light.as_str(), "light"); } + + #[test] + fn join_input_works() { + let buf1 = [1, 2, 3, 4]; + let buf2 = [5, 6, 7, 8]; + let mut test = [0, 0, 0]; + let mut joined = join_input(buf1.as_ref(), buf2.as_ref()); + assert_eq!(joined.remaining_len().unwrap(), Some(8)); + + joined.read(&mut test).unwrap(); + assert_eq!(test, [1, 2, 3]); + assert_eq!(joined.remaining_len().unwrap(), Some(5)); + + joined.read(&mut test).unwrap(); + assert_eq!(test, [4, 5, 6]); + assert_eq!(joined.remaining_len().unwrap(), Some(2)); + + joined.read(&mut test[0..2]).unwrap(); + assert_eq!(test, [7, 8, 6]); + assert_eq!(joined.remaining_len().unwrap(), Some(0)); + } } diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 42a7950593ccf..6df651e1b776c 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -274,10 +274,20 @@ impl NativeExecutor { default_heap_pages: Option, max_runtime_instances: usize, ) -> Self { - let mut host_functions = D::ExtendHostFunctions::host_functions(); + let extended = D::ExtendHostFunctions::host_functions(); + let mut host_functions = sp_io::SubstrateHostFunctions::host_functions() + .into_iter() + // filter out any host function overrides provided. + .filter(|host_fn| { + extended.iter() + .find(|ext_host_fn| host_fn.name() == ext_host_fn.name()) + .is_none() + }) + .collect::>(); + // Add the custom host functions provided by the user. - host_functions.extend(sp_io::SubstrateHostFunctions::host_functions()); + host_functions.extend(extended); let wasm_executor = WasmExecutor::new( fallback_method, default_heap_pages, diff --git a/client/finality-grandpa-warp-sync/src/proof.rs b/client/finality-grandpa-warp-sync/src/proof.rs index 1b447d2ef720c..4677d2401e835 100644 --- a/client/finality-grandpa-warp-sync/src/proof.rs +++ b/client/finality-grandpa-warp-sync/src/proof.rs @@ -20,7 +20,7 @@ use sc_finality_grandpa::{ find_scheduled_change, AuthoritySetChanges, BlockNumberOps, GrandpaJustification, }; use sp_blockchain::Backend as BlockchainBackend; -use sp_finality_grandpa::{AuthorityList, SetId}; +use sp_finality_grandpa::{AuthorityList, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor}, @@ -108,11 +108,14 @@ impl WarpSyncProof { break; } - let justification = backend.justification(BlockId::Number(*last_block))?.expect( - "header is last in set and contains standard change signal; \ - must have justification; \ - qed.", - ); + let justification = backend + .justifications(BlockId::Number(*last_block))? + .and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)) + .expect( + "header is last in set and contains standard change signal; \ + must have justification; \ + qed.", + ); let justification = GrandpaJustification::::decode(&mut &justification[..])?; @@ -171,6 +174,7 @@ mod tests { use sc_finality_grandpa::{AuthoritySetChanges, GrandpaJustification}; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; + use sp_finality_grandpa::GRANDPA_ENGINE_ID; use sp_keyring::Ed25519Keyring; use sp_runtime::{generic::BlockId, traits::Header as _}; use std::sync::Arc; @@ -233,7 +237,7 @@ mod tests { block.header.digest_mut().logs.push(digest); } - client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); if let Some(new_authorities) = new_authorities { // generate a justification for this block, finalize it and note the authority set @@ -272,7 +276,10 @@ mod tests { let justification = GrandpaJustification::from_commit(&client, 42, commit).unwrap(); client - .finalize_block(BlockId::Hash(target_hash), Some(justification.encode())) + .finalize_block( + BlockId::Hash(target_hash), + Some((GRANDPA_ENGINE_ID, justification.encode())) + ) .unwrap(); authority_set_changes.push((current_set_id, n)); diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 7ae5666c7bc84..1f21f454491b3 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -47,6 +47,7 @@ sc-block-builder = { version = "0.9.0", path = "../block-builder" } finality-grandpa = { version = "0.14.0", features = ["derive-codec"] } pin-project = "1.0.4" linked-hash-map = "0.5.2" +async-trait = "0.1.42" [dev-dependencies] assert_matches = "1.3.0" diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index 1854a33d29f1f..056460ac9ed80 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -19,17 +19,17 @@ //! Utilities for dealing with authorities, authority sets, and handoffs. use fork_tree::ForkTree; -use parking_lot::RwLock; +use parking_lot::MappedMutexGuard; use finality_grandpa::voter_set::VoterSet; use parity_scale_codec::{Encode, Decode}; use log::debug; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sp_finality_grandpa::{AuthorityId, AuthorityList}; +use sc_consensus::shared_data::{SharedData, SharedDataLocked}; use std::cmp::Ord; use std::fmt::Debug; use std::ops::Add; -use std::sync::Arc; /// Error type returned on operations on the `AuthoritySet`. #[derive(Debug, derive_more::Display)] @@ -70,19 +70,30 @@ impl From for Error { /// A shared authority set. pub struct SharedAuthoritySet { - inner: Arc>>, + inner: SharedData>, } impl Clone for SharedAuthoritySet { fn clone(&self) -> Self { - SharedAuthoritySet { inner: self.inner.clone() } + SharedAuthoritySet { + inner: self.inner.clone(), + } } } impl SharedAuthoritySet { - /// Acquire a reference to the inner read-write lock. - pub(crate) fn inner(&self) -> &RwLock> { - &*self.inner + /// Returns access to the [`AuthoritySet`]. + pub(crate) fn inner(&self) -> MappedMutexGuard> { + self.inner.shared_data() + } + + /// Returns access to the [`AuthoritySet`] and locks it. + /// + /// For more information see [`SharedDataLocked`]. + pub(crate) fn inner_locked( + &self, + ) -> SharedDataLocked> { + self.inner.shared_data_locked() } } @@ -93,17 +104,17 @@ where N: Add + Ord + Clone + Debug, /// Get the earliest limit-block number that's higher or equal to the given /// min number, if any. pub(crate) fn current_limit(&self, min: N) -> Option { - self.inner.read().current_limit(min) + self.inner().current_limit(min) } /// Get the current set ID. This is incremented every time the set changes. pub fn set_id(&self) -> u64 { - self.inner.read().set_id + self.inner().set_id } /// Get the current authorities and their weights (for the current set ID). pub fn current_authorities(&self) -> VoterSet { - VoterSet::new(self.inner.read().current_authorities.iter().cloned()).expect( + VoterSet::new(self.inner().current_authorities.iter().cloned()).expect( "current_authorities is non-empty and weights are non-zero; \ constructor and all mutating operations on `AuthoritySet` ensure this; \ qed.", @@ -112,18 +123,20 @@ where N: Add + Ord + Clone + Debug, /// Clone the inner `AuthoritySet`. pub fn clone_inner(&self) -> AuthoritySet { - self.inner.read().clone() + self.inner().clone() } /// Clone the inner `AuthoritySetChanges`. pub fn authority_set_changes(&self) -> AuthoritySetChanges { - self.inner.read().authority_set_changes.clone() + self.inner().authority_set_changes.clone() } } impl From> for SharedAuthoritySet { fn from(set: AuthoritySet) -> Self { - SharedAuthoritySet { inner: Arc::new(RwLock::new(set)) } + SharedAuthoritySet { + inner: SharedData::new(set), + } } } diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 43c45b9f10ae1..8ecfae40f68c7 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -592,7 +592,7 @@ mod test { ).unwrap(); assert_eq!( - *authority_set.inner().read(), + *authority_set.inner(), AuthoritySet::new( authorities.clone(), set_id, @@ -616,7 +616,7 @@ mod test { votes: vec![], }, set_id, - &*authority_set.inner().read(), + &*authority_set.inner(), ), current_rounds, }, @@ -688,7 +688,7 @@ mod test { ).unwrap(); assert_eq!( - *authority_set.inner().read(), + *authority_set.inner(), AuthoritySet::new( authorities.clone(), set_id, @@ -712,7 +712,7 @@ mod test { votes: vec![], }, set_id, - &*authority_set.inner().read(), + &*authority_set.inner(), ), current_rounds, }, @@ -781,7 +781,7 @@ mod test { ).unwrap(); assert_eq!( - *authority_set.inner().read(), + *authority_set.inner(), AuthoritySet::new( authorities.clone(), set_id, diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 5bb525549b18c..3786355d2db4e 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -55,7 +55,7 @@ use crate::justification::GrandpaJustification; use crate::until_imported::UntilVoteTargetImported; use crate::voting_rule::VotingRule; use sp_finality_grandpa::{ - AuthorityId, AuthoritySignature, Equivocation, EquivocationProof, + AuthorityId, AuthoritySignature, Equivocation, EquivocationProof, GRANDPA_ENGINE_ID, GrandpaApi, RoundNumber, SetId, }; use prometheus_endpoint::{register, Counter, Gauge, PrometheusError, U64}; @@ -508,7 +508,7 @@ where .best_chain() .map_err(|e| Error::Blockchain(e.to_string()))?; - let authority_set = self.authority_set.inner().read(); + let authority_set = self.authority_set.inner(); // block hash and number of the next pending authority set change in the // given best chain. @@ -1228,7 +1228,7 @@ where // NOTE: lock must be held through writing to DB to avoid race. this lock // also implicitly synchronizes the check for last finalized number // below. - let mut authority_set = authority_set.inner().write(); + let mut authority_set = authority_set.inner(); let status = client.info(); @@ -1326,10 +1326,13 @@ where // ideally some handle to a synchronization oracle would be used // to avoid unconditionally notifying. - client.apply_finality(import_op, BlockId::Hash(hash), justification, true).map_err(|e| { - warn!(target: "afg", "Error applying finality to block {:?}: {:?}", (hash, number), e); - e - })?; + let justification = justification.map(|j| (GRANDPA_ENGINE_ID, j.clone())); + client + .apply_finality(import_op, BlockId::Hash(hash), justification, true) + .map_err(|e| { + warn!(target: "afg", "Error applying finality to block {:?}: {:?}", (hash, number), e); + e + })?; telemetry!( telemetry; CONSENSUS_INFO; diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index b79b3190739d6..80ba8cee9101e 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -43,11 +43,11 @@ use finality_grandpa::BlockNumberOps; use parity_scale_codec::{Encode, Decode}; use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; use sp_runtime::{ - Justification, generic::BlockId, + EncodedJustification, generic::BlockId, traits::{NumberFor, Block as BlockT, Header as HeaderT, One}, }; use sc_client_api::backend::Backend; -use sp_finality_grandpa::AuthorityId; +use sp_finality_grandpa::{AuthorityId, GRANDPA_ENGINE_ID}; use crate::authorities::AuthoritySetChanges; use crate::justification::GrandpaJustification; @@ -190,8 +190,10 @@ where // Get the Justification stored at the last block of the set let last_block_for_set_id = BlockId::Number(last_block_for_set); let justification = - if let Some(justification) = blockchain.justification(last_block_for_set_id)? { - justification + if let Some(grandpa_justification) = blockchain.justifications(last_block_for_set_id)? + .and_then(|justifications| justifications.into_justification(GRANDPA_ENGINE_ID)) + { + grandpa_justification } else { trace!( target: "afg", @@ -257,7 +259,7 @@ pub trait ProvableJustification: Encode + Decode { /// Decode and verify justification. fn decode_and_verify( - justification: &Justification, + justification: &EncodedJustification, set_id: u64, authorities: &[(AuthorityId, u64)], ) -> ClientResult { @@ -286,6 +288,7 @@ pub(crate) mod tests { use super::*; use crate::authorities::AuthoritySetChanges; use sp_core::crypto::Public; + use sp_runtime::Justifications; use sp_finality_grandpa::AuthorityList; use sc_client_api::NewBlockState; use sc_client_api::in_mem::Blockchain as InMemoryBlockchain; @@ -330,31 +333,27 @@ pub(crate) mod tests { } fn test_blockchain() -> InMemoryBlockchain { + use sp_finality_grandpa::GRANDPA_ENGINE_ID as ID; let blockchain = InMemoryBlockchain::::new(); - blockchain - .insert(header(0).hash(), header(0), Some(vec![0]), None, NewBlockState::Final) - .unwrap(); - blockchain - .insert(header(1).hash(), header(1), Some(vec![1]), None, NewBlockState::Final) - .unwrap(); - blockchain - .insert(header(2).hash(), header(2), None, None, NewBlockState::Best) - .unwrap(); - blockchain - .insert(header(3).hash(), header(3), Some(vec![3]), None, NewBlockState::Final) - .unwrap(); + let just0 = Some(Justifications::from((ID, vec![0]))); + let just1 = Some(Justifications::from((ID, vec![1]))); + let just2 = None; + let just3 = Some(Justifications::from((ID, vec![3]))); + blockchain.insert(header(0).hash(), header(0), just0, None, NewBlockState::Final).unwrap(); + blockchain.insert(header(1).hash(), header(1), just1, None, NewBlockState::Final).unwrap(); + blockchain.insert(header(2).hash(), header(2), just2, None, NewBlockState::Best).unwrap(); + blockchain.insert(header(3).hash(), header(3), just3, None, NewBlockState::Final).unwrap(); blockchain } #[test] fn finality_proof_fails_if_no_more_last_finalized_blocks() { + use sp_finality_grandpa::GRANDPA_ENGINE_ID as ID; let blockchain = test_blockchain(); - blockchain - .insert(header(4).hash(), header(4), Some(vec![1]), None, NewBlockState::Best) - .unwrap(); - blockchain - .insert(header(5).hash(), header(5), Some(vec![2]), None, NewBlockState::Best) - .unwrap(); + let just1 = Some(Justifications::from((ID, vec![1]))); + let just2 = Some(Justifications::from((ID, vec![2]))); + blockchain.insert(header(4).hash(), header(4), just1, None, NewBlockState::Best).unwrap(); + blockchain.insert(header(5).hash(), header(5), just2, None, NewBlockState::Best).unwrap(); let mut authority_set_changes = AuthoritySetChanges::empty(); authority_set_changes.append(0, 5); @@ -430,22 +429,17 @@ pub(crate) mod tests { #[test] fn finality_proof_using_authority_set_changes_fails_with_undefined_start() { + use sp_finality_grandpa::GRANDPA_ENGINE_ID as ID; let blockchain = test_blockchain(); let auth = vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]; - let just4 = TestJustification((0, auth.clone()), vec![4]).encode(); - let just7 = TestJustification((1, auth.clone()), vec![7]).encode(); - blockchain - .insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final) - .unwrap(); - blockchain - .insert(header(5).hash(), header(5), None, None, NewBlockState::Final) - .unwrap(); - blockchain - .insert(header(6).hash(), header(6), None, None, NewBlockState::Final) - .unwrap(); - blockchain - .insert(header(7).hash(), header(7), Some(just7.clone()), None, NewBlockState::Final) - .unwrap(); + let grandpa_just4 = TestJustification((0, auth.clone()), vec![4]).encode(); + let grandpa_just7 = TestJustification((1, auth.clone()), vec![7]).encode(); + let just4 = Some(Justifications::from((ID, grandpa_just4))); + let just7 = Some(Justifications::from((ID, grandpa_just7))); + blockchain.insert(header(4).hash(), header(4), just4, None, NewBlockState::Final).unwrap(); + blockchain.insert(header(5).hash(), header(5), None, None, NewBlockState::Final).unwrap(); + blockchain.insert(header(6).hash(), header(6), None, None, NewBlockState::Final).unwrap(); + blockchain.insert(header(7).hash(), header(7), just7, None, NewBlockState::Final).unwrap(); // We have stored the correct block number for the relevant set, but as we are missing the // block for the preceding set the start is not well-defined. @@ -462,22 +456,17 @@ pub(crate) mod tests { #[test] fn finality_proof_using_authority_set_changes_works() { + use sp_finality_grandpa::GRANDPA_ENGINE_ID as ID; let blockchain = test_blockchain(); let auth = vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]; - let just4 = TestJustification((0, auth.clone()), vec![4]).encode(); - let just7 = TestJustification((1, auth.clone()), vec![7]).encode(); - blockchain - .insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final) - .unwrap(); - blockchain - .insert(header(5).hash(), header(5), None, None, NewBlockState::Final) - .unwrap(); - blockchain - .insert(header(6).hash(), header(6), None, None, NewBlockState::Final) - .unwrap(); - blockchain - .insert(header(7).hash(), header(7), Some(just7.clone()), None, NewBlockState::Final) - .unwrap(); + let grandpa_just4 = TestJustification((0, auth.clone()), vec![4]).encode(); + let grandpa_just7 = TestJustification((1, auth.clone()), vec![7]).encode(); + let just4 = Some(Justifications::from((ID, grandpa_just4))); + let just7 = Some(Justifications::from((ID, grandpa_just7.clone()))); + blockchain.insert(header(4).hash(), header(4), just4, None, NewBlockState::Final) .unwrap(); + blockchain.insert(header(5).hash(), header(5), None, None, NewBlockState::Final) .unwrap(); + blockchain.insert(header(6).hash(), header(6), None, None, NewBlockState::Final).unwrap(); + blockchain.insert(header(7).hash(), header(7), just7, None, NewBlockState::Final).unwrap(); let mut authority_set_changes = AuthoritySetChanges::empty(); authority_set_changes.append(0, 4); @@ -497,7 +486,7 @@ pub(crate) mod tests { proof_of_5, FinalityProof { block: header(7).hash(), - justification: just7, + justification: grandpa_just7, unknown_headers: vec![header(6)], } ); diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 22d7b7fd5bcc8..b2fcca019bcb1 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -20,13 +20,13 @@ use std::{sync::Arc, collections::HashMap}; use log::debug; use parity_scale_codec::Encode; -use parking_lot::RwLockWriteGuard; use sp_blockchain::{BlockStatus, well_known_cache_keys}; use sc_client_api::{backend::Backend, utils::is_descendent_of}; use sc_telemetry::TelemetryHandle; use sp_utils::mpsc::TracingUnboundedSender; use sp_api::TransactionFor; +use sc_consensus::shared_data::{SharedDataLockedUpgradable, SharedDataLocked}; use sp_consensus::{ BlockImport, Error as ConsensusError, @@ -99,7 +99,7 @@ impl JustificationImport let chain_info = self.inner.info(); // request justifications for all pending changes for which change blocks have already been imported - let authorities = self.authority_set.inner().read(); + let authorities = self.authority_set.inner(); for pending_change in authorities.pending_changes() { if pending_change.delay_kind == DelayKind::Finalized && pending_change.effective_number() > chain_info.finalized_number && @@ -157,30 +157,30 @@ impl AppliedChanges { } } -struct PendingSetChanges<'a, Block: 'a + BlockT> { +struct PendingSetChanges { just_in_case: Option<( AuthoritySet>, - RwLockWriteGuard<'a, AuthoritySet>>, + SharedDataLockedUpgradable>>, )>, applied_changes: AppliedChanges>, do_pause: bool, } -impl<'a, Block: 'a + BlockT> PendingSetChanges<'a, Block> { +impl PendingSetChanges { // revert the pending set change explicitly. - fn revert(self) { } + fn revert(self) {} fn defuse(mut self) -> (AppliedChanges>, bool) { self.just_in_case = None; - let applied_changes = ::std::mem::replace(&mut self.applied_changes, AppliedChanges::None); + let applied_changes = std::mem::replace(&mut self.applied_changes, AppliedChanges::None); (applied_changes, self.do_pause) } } -impl<'a, Block: 'a + BlockT> Drop for PendingSetChanges<'a, Block> { +impl Drop for PendingSetChanges { fn drop(&mut self) { if let Some((old_set, mut authorities)) = self.just_in_case.take() { - *authorities = old_set; + *authorities.upgrade() = old_set; } } } @@ -269,33 +269,38 @@ where // when we update the authorities, we need to hold the lock // until the block is written to prevent a race if we need to restore // the old authority set on error or panic. - struct InnerGuard<'a, T: 'a> { - old: Option, - guard: Option>, + struct InnerGuard<'a, H, N> { + old: Option>, + guard: Option>>, } - impl<'a, T: 'a> InnerGuard<'a, T> { - fn as_mut(&mut self) -> &mut T { + impl<'a, H, N> InnerGuard<'a, H, N> { + fn as_mut(&mut self) -> &mut AuthoritySet { &mut **self.guard.as_mut().expect("only taken on deconstruction; qed") } - fn set_old(&mut self, old: T) { + fn set_old(&mut self, old: AuthoritySet) { if self.old.is_none() { // ignore "newer" old changes. self.old = Some(old); } } - fn consume(mut self) -> Option<(T, RwLockWriteGuard<'a, T>)> { + fn consume( + mut self, + ) -> Option<(AuthoritySet, SharedDataLocked<'a, AuthoritySet>)> { if let Some(old) = self.old.take() { - Some((old, self.guard.take().expect("only taken on deconstruction; qed"))) + Some(( + old, + self.guard.take().expect("only taken on deconstruction; qed"), + )) } else { None } } } - impl<'a, T: 'a> Drop for InnerGuard<'a, T> { + impl<'a, H, N> Drop for InnerGuard<'a, H, N> { fn drop(&mut self) { if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { *guard = old; @@ -315,7 +320,7 @@ where let is_descendent_of = is_descendent_of(&*self.inner, Some((hash, parent_hash))); let mut guard = InnerGuard { - guard: Some(self.authority_set.inner().write()), + guard: Some(self.authority_set.inner_locked()), old: None, }; @@ -413,10 +418,13 @@ where ); } + let just_in_case = just_in_case.map(|(o, i)| (o, i.release_mutex())); + Ok(PendingSetChanges { just_in_case, applied_changes, do_pause }) } } +#[async_trait::async_trait] impl BlockImport for GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, @@ -425,11 +433,13 @@ impl BlockImport Client: crate::ClientForGrandpa, for<'a> &'a Client: BlockImport>, + TransactionFor: Send + 'static, + SC: Send, { type Error = ConsensusError; type Transaction = TransactionFor; - fn import_block( + async fn import_block( &mut self, mut block: BlockImportParams, new_cache: HashMap>, @@ -451,8 +461,8 @@ impl BlockImport let pending_changes = self.make_authorities_changes(&mut block, hash, initial_sync)?; // we don't want to finalize on `inner.import_block` - let mut justification = block.justification.take(); - let import_result = (&*self.inner).import_block(block, new_cache); + let mut justifications = block.justifications.take(); + let import_result = (&*self.inner).import_block(block, new_cache).await; let mut imported_aux = { match import_result { @@ -513,17 +523,20 @@ impl BlockImport // need to apply first, drop any justification that might have been provided with // the block to make sure we request them from `sync` which will ensure they'll be // applied in-order. - justification.take(); + justifications.take(); }, _ => {}, } - match justification { + let grandpa_justification = justifications + .and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)); + + match grandpa_justification { Some(justification) => { let import_res = self.import_justification( hash, number, - justification, + (GRANDPA_ENGINE_ID, justification), needs_justification, initial_sync, ); @@ -553,11 +566,11 @@ impl BlockImport Ok(ImportResult::Imported(imported_aux)) } - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - self.inner.check_block(block) + self.inner.check_block(block).await } } @@ -577,8 +590,7 @@ impl GrandpaBlockImport GrandpaBlockImport Result<(), ConsensusError> { + if justification.0 != GRANDPA_ENGINE_ID { + return Err(ConsensusError::ClientImport( + "GRANDPA can only import GRANDPA Justifications.".into(), + )); + } + let justification = GrandpaJustification::decode_and_verify_finalizes( - &justification, + &justification.1, (hash, number), self.authority_set.set_id(), &self.authority_set.current_authorities(), diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index c6c2a39674b8c..9a8939660473b 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -1019,7 +1019,7 @@ where // set changed (not where the signal happened!) as the base. let set_state = VoterSetState::live( new.set_id, - &*self.env.authority_set.inner().read(), + &*self.env.authority_set.inner(), (new.canon_hash, new.canon_number), ); diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index c0eab15e4f455..827a7388d6033 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -326,7 +326,7 @@ where // set changed (not where the signal happened!) as the base. let set_state = VoterSetState::live( new.set_id, - &*self.persistent_data.authority_set.inner().read(), + &*self.persistent_data.authority_set.inner(), (new.canon_hash, new.canon_number), ); diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 6824a8ed04273..b87bbefc11375 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -28,9 +28,9 @@ use sc_network_test::{ use sc_network::config::ProtocolConfig; use parking_lot::{RwLock, Mutex}; use futures_timer::Delay; +use futures::executor::block_on; use tokio::runtime::{Runtime, Handle}; use sp_keyring::Ed25519Keyring; -use sc_client_api::backend::TransactionFor; use sp_blockchain::Result; use sp_api::{ApiRef, ProvideRuntimeApi}; use substrate_test_runtime_client::runtime::BlockNumber; @@ -39,11 +39,13 @@ use sp_consensus::{ import_queue::BoxJustificationImport, }; use std::{collections::{HashMap, HashSet}, pin::Pin}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_runtime::{Justifications, traits::{Block as BlockT, Header as HeaderT}}; use sp_runtime::generic::{BlockId, DigestItem}; use sp_core::H256; use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use sp_finality_grandpa::{GRANDPA_ENGINE_ID, AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof}; +use sp_finality_grandpa::{ + GRANDPA_ENGINE_ID, AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof, +}; use authorities::AuthoritySet; use sc_block_builder::BlockBuilderProvider; @@ -54,7 +56,13 @@ use sp_application_crypto::key_types::GRANDPA; type TestLinkHalf = LinkHalf>; type PeerData = Mutex>; -type GrandpaPeer = Peer; +type GrandpaPeer = Peer; +type GrandpaBlockImport = crate::GrandpaBlockImport< + substrate_test_runtime_client::Backend, + Block, + PeersFullClient, + LongestChain +>; struct GrandpaTestNet { peers: Vec, @@ -62,21 +70,38 @@ struct GrandpaTestNet { } impl GrandpaTestNet { - fn new(test_config: TestApi, n_peers: usize) -> Self { + fn new(test_config: TestApi, n_authority: usize, n_full: usize) -> Self { let mut net = GrandpaTestNet { - peers: Vec::with_capacity(n_peers), + peers: Vec::with_capacity(n_authority + n_full), test_config, }; - for _ in 0..n_peers { + + for _ in 0..n_authority { + net.add_authority_peer(); + } + + for _ in 0..n_full { net.add_full_peer(); } + net } } +impl GrandpaTestNet { + fn add_authority_peer(&mut self) { + self.add_full_peer_with_config(FullPeerConfig { + notifications_protocols: vec![communication::GRANDPA_PROTOCOL_NAME.into()], + is_authority: true, + ..Default::default() + }) + } +} + impl TestNetFactory for GrandpaTestNet { type Verifier = PassThroughVerifier; type PeerData = PeerData; + type BlockImport = GrandpaBlockImport; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { @@ -94,6 +119,7 @@ impl TestNetFactory for GrandpaTestNet { fn add_full_peer(&mut self) { self.add_full_peer_with_config(FullPeerConfig { notifications_protocols: vec![communication::GRANDPA_PROTOCOL_NAME.into()], + is_authority: false, ..Default::default() }) } @@ -107,9 +133,9 @@ impl TestNetFactory for GrandpaTestNet { PassThroughVerifier::new(false) // use non-instant finality. } - fn make_block_import(&self, client: PeersClient) + fn make_block_import(&self, client: PeersClient) -> ( - BlockImportAdapter, + BlockImportAdapter, Option>, PeerData, ) @@ -124,7 +150,7 @@ impl TestNetFactory for GrandpaTestNet { ).expect("Could not create block import for fresh peer."); let justification_import = Box::new(import.clone()); ( - BlockImportAdapter::new_full(import), + BlockImportAdapter::new(import), Some(justification_import), Mutex::new(Some(link)), ) @@ -354,7 +380,7 @@ fn finalize_3_voters_no_observers() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 0); runtime.spawn(initialize_grandpa(&mut net, peers)); net.peer(0).push_blocks(20, false); net.block_until_sync(); @@ -369,7 +395,7 @@ fn finalize_3_voters_no_observers() { // normally there's no justification for finalized blocks assert!( - net.lock().peer(0).client().justification(&BlockId::Number(20)).unwrap().is_none(), + net.lock().peer(0).client().justifications(&BlockId::Number(20)).unwrap().is_none(), "Extra justification for block#1", ); } @@ -381,7 +407,7 @@ fn finalize_3_voters_1_full_observer() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1); runtime.spawn(initialize_grandpa(&mut net, peers)); runtime.spawn({ @@ -464,7 +490,7 @@ fn transition_3_voters_twice_1_full_observer() { let genesis_voters = make_ids(peers_a); let api = TestApi::new(genesis_voters); - let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8))); + let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8, 1))); let mut runtime = Runtime::new().unwrap(); @@ -602,7 +628,7 @@ fn justification_is_generated_periodically() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 0); runtime.spawn(initialize_grandpa(&mut net, peers)); net.peer(0).push_blocks(32, false); net.block_until_sync(); @@ -613,7 +639,7 @@ fn justification_is_generated_periodically() { // when block#32 (justification_period) is finalized, justification // is required => generated for i in 0..3 { - assert!(net.lock().peer(i).client().justification(&BlockId::Number(32)).unwrap().is_some()); + assert!(net.lock().peer(i).client().justifications(&BlockId::Number(32)).unwrap().is_some()); } } @@ -626,7 +652,7 @@ fn sync_justifications_on_change_blocks() { // 4 peers, 3 of them are authorities and participate in grandpa let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api, 4); + let mut net = GrandpaTestNet::new(api, 3, 1); let voters = initialize_grandpa(&mut net, peers_a); // add 20 blocks @@ -658,12 +684,12 @@ fn sync_justifications_on_change_blocks() { // the first 3 peers are grandpa voters and therefore have already finalized // block 21 and stored a justification for i in 0..3 { - assert!(net.lock().peer(i).client().justification(&BlockId::Number(21)).unwrap().is_some()); + assert!(net.lock().peer(i).client().justifications(&BlockId::Number(21)).unwrap().is_some()); } // the last peer should get the justification by syncing from other peers futures::executor::block_on(futures::future::poll_fn(move |cx| { - if net.lock().peer(3).client().justification(&BlockId::Number(21)).unwrap().is_none() { + if net.lock().peer(3).client().justifications(&BlockId::Number(21)).unwrap().is_none() { net.lock().poll(cx); Poll::Pending } else { @@ -688,8 +714,10 @@ fn finalizes_multiple_pending_changes_in_order() { let genesis_voters = make_ids(peers_a); // 6 peers, 3 of them are authorities and participate in grandpa from genesis + // but all of them will be part of the voter set eventually so they should be + // all added to the network as authorities let api = TestApi::new(genesis_voters); - let mut net = GrandpaTestNet::new(api, 6); + let mut net = GrandpaTestNet::new(api, 6, 0); runtime.spawn(initialize_grandpa(&mut net, all_peers)); // add 20 blocks @@ -749,7 +777,7 @@ fn force_change_to_new_set() { let api = TestApi::new(make_ids(genesis_authorities)); let voters = make_ids(peers_a); - let mut net = GrandpaTestNet::new(api, 3); + let mut net = GrandpaTestNet::new(api, 3, 0); let voters_future = initialize_grandpa(&mut net, peers_a); let net = Arc::new(Mutex::new(net)); @@ -798,14 +826,10 @@ fn allows_reimporting_change_blocks() { let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers_a); let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api.clone(), 3); + let mut net = GrandpaTestNet::new(api.clone(), 3, 0); let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import::< - TransactionFor - >( - client.clone(), - ); + let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_full().unwrap(); let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -825,7 +849,7 @@ fn allows_reimporting_change_blocks() { }; assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), + block_on(block_import.import_block(block(), HashMap::new())).unwrap(), ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false, @@ -836,7 +860,7 @@ fn allows_reimporting_change_blocks() { ); assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), + block_on(block_import.import_block(block(), HashMap::new())).unwrap(), ImportResult::AlreadyInChain ); } @@ -847,14 +871,10 @@ fn test_bad_justification() { let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers_a); let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api.clone(), 3); + let mut net = GrandpaTestNet::new(api.clone(), 3, 0); let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import::< - TransactionFor - >( - client.clone(), - ); + let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_full().expect("only full clients are used in test"); let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -868,7 +888,7 @@ fn test_bad_justification() { let block = || { let block = block.clone(); let mut import = BlockImportParams::new(BlockOrigin::File, block.header); - import.justification = Some(Vec::new()); + import.justifications = Some(Justifications::from((GRANDPA_ENGINE_ID, Vec::new()))); import.body = Some(block.extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); @@ -876,7 +896,7 @@ fn test_bad_justification() { }; assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), + block_on(block_import.import_block(block(), HashMap::new())).unwrap(), ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false, @@ -887,7 +907,7 @@ fn test_bad_justification() { ); assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), + block_on(block_import.import_block(block(), HashMap::new())).unwrap(), ImportResult::AlreadyInChain ); } @@ -907,7 +927,7 @@ fn voter_persists_its_votes() { let voters = make_ids(peers); // alice has a chain with 20 blocks - let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2); + let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2, 0); // create the communication layer for bob, but don't start any // voter. instead we'll listen for the prevote that alice casts @@ -931,9 +951,7 @@ fn voter_persists_its_votes() { let set_state = { let bob_client = net.peer(1).client().clone(); let (_, _, link) = net - .make_block_import::< - TransactionFor - >(bob_client); + .make_block_import(bob_client); let LinkHalf { persistent_data, .. } = link.lock().take().unwrap(); let PersistentData { set_state, .. } = persistent_data; set_state @@ -992,7 +1010,7 @@ fn voter_persists_its_votes() { // we add a new peer to the test network and we'll use // the network service of this new peer - net.add_full_peer(); + net.add_authority_peer(); let net_service = net.peers[2].network_service().clone(); // but we'll reuse the client from the first peer (alice_voter1) // since we want to share the same database, so that we can @@ -1000,9 +1018,7 @@ fn voter_persists_its_votes() { let alice_client = net.peer(0).client().clone(); let (_block_import, _, link) = net - .make_block_import::< - TransactionFor - >(alice_client); + .make_block_import(alice_client); let link = link.lock().take().unwrap(); let grandpa_params = GrandpaParams { @@ -1163,7 +1179,7 @@ fn finalize_3_voters_1_light_observer() { let authorities = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(authorities); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1); let voters = initialize_grandpa(&mut net, authorities); let observer = observer::run_grandpa_observer( Config { @@ -1201,7 +1217,7 @@ fn voter_catches_up_to_latest_round_when_behind() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers); - let net = GrandpaTestNet::new(TestApi::new(voters), 2); + let net = GrandpaTestNet::new(TestApi::new(voters), 2, 0); let net = Arc::new(Mutex::new(net)); let mut finality_notifications = Vec::new(); @@ -1269,7 +1285,7 @@ fn voter_catches_up_to_latest_round_when_behind() { let runtime = runtime.handle().clone(); wait_for_finality.then(move |_| { - net.lock().add_full_peer(); + net.lock().add_authority_peer(); let link = { let net = net.lock(); @@ -1373,7 +1389,7 @@ fn grandpa_environment_respects_voting_rules() { let peers = &[Ed25519Keyring::Alice]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 1); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); let peer = net.peer(0); let network_service = peer.network_service().clone(); let link = peer.data.lock().take().unwrap(); @@ -1403,7 +1419,7 @@ fn grandpa_environment_respects_voting_rules() { // the unrestricted environment should just return the best block assert_eq!( - futures::executor::block_on(unrestricted_env.best_chain_containing( + block_on(unrestricted_env.best_chain_containing( peer.client().info().finalized_hash )).unwrap().unwrap().1, 21, @@ -1412,14 +1428,14 @@ fn grandpa_environment_respects_voting_rules() { // both the other environments should return block 16, which is 3/4 of the // way in the unfinalized chain assert_eq!( - futures::executor::block_on(three_quarters_env.best_chain_containing( + block_on(three_quarters_env.best_chain_containing( peer.client().info().finalized_hash )).unwrap().unwrap().1, 16, ); assert_eq!( - futures::executor::block_on(default_env.best_chain_containing( + block_on(default_env.best_chain_containing( peer.client().info().finalized_hash )).unwrap().unwrap().1, 16, @@ -1430,7 +1446,7 @@ fn grandpa_environment_respects_voting_rules() { // the 3/4 environment should propose block 21 for voting assert_eq!( - futures::executor::block_on(three_quarters_env.best_chain_containing( + block_on(three_quarters_env.best_chain_containing( peer.client().info().finalized_hash )).unwrap().unwrap().1, 21, @@ -1439,7 +1455,7 @@ fn grandpa_environment_respects_voting_rules() { // while the default environment will always still make sure we don't vote // on the best block (2 behind) assert_eq!( - futures::executor::block_on(default_env.best_chain_containing( + block_on(default_env.best_chain_containing( peer.client().info().finalized_hash )).unwrap().unwrap().1, 19, @@ -1452,7 +1468,7 @@ fn grandpa_environment_respects_voting_rules() { // best block, there's a hard rule that we can't cast any votes lower than // the given base (#21). assert_eq!( - futures::executor::block_on(default_env.best_chain_containing( + block_on(default_env.best_chain_containing( peer.client().info().finalized_hash )).unwrap().unwrap().1, 21, @@ -1466,7 +1482,7 @@ fn grandpa_environment_never_overwrites_round_voter_state() { let peers = &[Ed25519Keyring::Alice]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 1); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); let peer = net.peer(0); let network_service = peer.network_service().clone(); let link = peer.data.lock().take().unwrap(); @@ -1535,12 +1551,10 @@ fn imports_justification_for_regular_blocks_on_import() { let peers = &[Ed25519Keyring::Alice]; let voters = make_ids(peers); let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api.clone(), 1); + let mut net = GrandpaTestNet::new(api.clone(), 1, 0); let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import::< - TransactionFor - >(client.clone()); + let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_full().expect("only full clients are used in test"); let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -1583,12 +1597,12 @@ fn imports_justification_for_regular_blocks_on_import() { // we import the block with justification attached let mut import = BlockImportParams::new(BlockOrigin::File, block.header); - import.justification = Some(justification.encode()); + import.justifications = Some((GRANDPA_ENGINE_ID, justification.encode()).into()); import.body = Some(block.extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); assert_eq!( - block_import.import_block(import, HashMap::new()).unwrap(), + block_on(block_import.import_block(import, HashMap::new())).unwrap(), ImportResult::Imported(ImportedAux { needs_justification: false, clear_justification_requests: false, @@ -1600,7 +1614,7 @@ fn imports_justification_for_regular_blocks_on_import() { // the justification should be imported and available from the client assert!( - client.justification(&BlockId::Hash(block_hash)).unwrap().is_some(), + client.justifications(&BlockId::Hash(block_hash)).unwrap().is_some(), ); } @@ -1612,7 +1626,7 @@ fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { let voters = make_ids(&[alice]); let environment = { - let mut net = GrandpaTestNet::new(TestApi::new(voters), 1); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); let peer = net.peer(0); let network_service = peer.network_service().clone(); let link = peer.data.lock().take().unwrap(); diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index c27eab5351562..bcde68d2fb338 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -987,7 +987,7 @@ mod tests { threads_pool.spawn_ok(until_imported.into_future().map(|_| ())); // assert that we will make sync requests - let assert = futures::future::poll_fn(|_| { + let assert = futures::future::poll_fn(|ctx| { let block_sync_requests = block_sync_requester.requests.lock(); // we request blocks targeted by the precommits that aren't imported @@ -997,6 +997,11 @@ mod tests { return Poll::Ready(()); } + // NOTE: nothing in this function is future-aware (i.e nothing gets registered to wake + // up this future), we manually wake up this task to avoid having to wait until the + // timeout below triggers. + ctx.waker().wake_by_ref(); + Poll::Pending }); diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index 9b3fb9b328560..3ede7649a1387 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -372,7 +372,7 @@ mod tests { .unwrap() .block; - client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } let genesis = client diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 27e0754eb552e..621ada13ff61d 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -30,9 +30,9 @@ use sp_core::storage::{well_known_keys, ChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, - StorageCollection, ChildStorageCollection, + StorageCollection, ChildStorageCollection, IndexOperation, }; -use sp_runtime::{generic::BlockId, Justification, Storage}; +use sp_runtime::{generic::BlockId, Justification, Justifications, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero, Header, HashFor}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{ @@ -199,6 +199,14 @@ impl ClientBackend for Backend> self.blockchain.storage().finalize_header(block) } + fn append_justification( + &self, + _block: BlockId, + _justification: Justification, + ) -> ClientResult<()> { + Ok(()) + } + fn blockchain(&self) -> &Blockchain { &self.blockchain } @@ -278,7 +286,7 @@ impl BlockImportOperation for ImportOperation &mut self, header: Block::Header, _body: Option>, - _justification: Option, + _justifications: Option, state: NewBlockState, ) -> ClientResult<()> { self.leaf_state = state; @@ -356,7 +364,7 @@ impl BlockImportOperation for ImportOperation fn mark_finalized( &mut self, block: BlockId, - _justification: Option, + _justifications: Option, ) -> ClientResult<()> { self.finalized_blocks.push(block); Ok(()) @@ -366,6 +374,11 @@ impl BlockImportOperation for ImportOperation self.set_head = Some(block); Ok(()) } + + fn update_transaction_index(&mut self, _index: Vec) -> sp_blockchain::Result<()> { + // noop for the light client + Ok(()) + } } impl std::fmt::Debug for GenesisOrUnavailableState { diff --git a/client/light/src/blockchain.rs b/client/light/src/blockchain.rs index bcabc365676a5..3349adf7ac693 100644 --- a/client/light/src/blockchain.rs +++ b/client/light/src/blockchain.rs @@ -21,7 +21,7 @@ use std::sync::Arc; -use sp_runtime::{Justification, generic::BlockId}; +use sp_runtime::{Justifications, generic::BlockId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; use sp_blockchain::{ @@ -109,7 +109,7 @@ impl BlockchainBackend for Blockchain where Block: BlockT, S Err(ClientError::NotAvailableOnLightClient) } - fn justification(&self, _id: BlockId) -> ClientResult> { + fn justifications(&self, _id: BlockId) -> ClientResult> { Err(ClientError::NotAvailableOnLightClient) } @@ -129,10 +129,10 @@ impl BlockchainBackend for Blockchain where Block: BlockT, S Err(ClientError::NotAvailableOnLightClient) } - fn extrinsic( + fn indexed_transaction( &self, _hash: &Block::Hash, - ) -> ClientResult::Extrinsic>> { + ) -> ClientResult>> { Err(ClientError::NotAvailableOnLightClient) } } diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index b5f3b754af030..a72e65ab3f572 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -17,13 +17,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.9" futures-timer = "3.0.1" -libp2p = { version = "0.35.1", default-features = false } +libp2p = { version = "0.36.0", default-features = false } log = "0.4.8" lru = "0.6.5" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } sc-network = { version = "0.9.0", path = "../network" } sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } wasm-timer = "0.2" +tracing = "0.1.25" [dev-dependencies] async-std = "1.6.5" diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index a58432d8c2476..4c006f288f011 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -23,7 +23,6 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; use std::iter; use std::time; -use log::{debug, error, trace}; use lru::LruCache; use libp2p::PeerId; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; @@ -146,7 +145,13 @@ fn propagate<'a, B: BlockT, I>( peer.known_messages.insert(message_hash.clone()); - trace!(target: "gossip", "Propagating to {}: {:?}", id, message); + tracing::trace!( + target: "gossip", + to = %id, + %protocol, + ?message, + "Propagating message", + ); network.write_notification(id.clone(), protocol.clone(), message.clone()); } } @@ -173,7 +178,7 @@ impl ConsensusGossip { let metrics = match metrics_registry.map(Metrics::register) { Some(Ok(metrics)) => Some(metrics), Some(Err(e)) => { - debug!(target: "gossip", "Failed to register metrics: {:?}", e); + tracing::debug!(target: "gossip", "Failed to register metrics: {:?}", e); None } None => None, @@ -197,7 +202,13 @@ impl ConsensusGossip { return; } - trace!(target:"gossip", "Registering {:?} {}", role, who); + tracing::trace!( + target:"gossip", + %who, + protocol = %self.protocol, + ?role, + "Registering peer", + ); self.peers.insert(who.clone(), PeerConsensus { known_messages: HashSet::new(), }); @@ -301,7 +312,10 @@ impl ConsensusGossip { metrics.expired_messages.inc_by(expired_messages as u64) } - trace!(target: "gossip", "Cleaned up {} stale messages, {} left ({} known)", + tracing::trace!( + target: "gossip", + protocol = %self.protocol, + "Cleaned up {} stale messages, {} left ({} known)", expired_messages, self.messages.len(), known_messages.len(), @@ -331,14 +345,25 @@ impl ConsensusGossip { let mut to_forward = vec![]; if !messages.is_empty() { - trace!(target: "gossip", "Received {} messages from peer {}", messages.len(), who); + tracing::trace!( + target: "gossip", + messages_num = %messages.len(), + %who, + protocol = %self.protocol, + "Received messages from peer", + ); } for message in messages { let message_hash = HashFor::::hash(&message[..]); if self.known_messages.contains(&message_hash) { - trace!(target:"gossip", "Ignored already known message from {}", who); + tracing::trace!( + target: "gossip", + %who, + protocol = %self.protocol, + "Ignored already known message", + ); network.report_peer(who.clone(), rep::DUPLICATE_GOSSIP); continue; } @@ -354,7 +379,12 @@ impl ConsensusGossip { ValidationResult::ProcessAndKeep(topic) => (topic, true), ValidationResult::ProcessAndDiscard(topic) => (topic, false), ValidationResult::Discard => { - trace!(target:"gossip", "Discard message from peer {}", who); + tracing::trace!( + target: "gossip", + %who, + protocol = %self.protocol, + "Discard message from peer", + ); continue; }, }; @@ -362,7 +392,12 @@ impl ConsensusGossip { let peer = match self.peers.get_mut(&who) { Some(peer) => peer, None => { - error!(target:"gossip", "Got message from unregistered peer {}", who); + tracing::error!( + target: "gossip", + %who, + protocol = %self.protocol, + "Got message from unregistered peer", + ); continue; } }; @@ -415,7 +450,13 @@ impl ConsensusGossip { peer.known_messages.insert(entry.message_hash.clone()); - trace!(target: "gossip", "Sending topic message to {}: {:?}", who, entry.message); + tracing::trace!( + target: "gossip", + to = %who, + protocol = %self.protocol, + ?entry.message, + "Sending topic message", + ); network.write_notification(who.clone(), self.protocol.clone(), entry.message.clone()); } } @@ -457,7 +498,13 @@ impl ConsensusGossip { let message_hash = HashFor::::hash(&message); - trace!(target: "gossip", "Sending direct to {}: {:?}", who, message); + tracing::trace!( + target: "gossip", + to = %who, + protocol = %self.protocol, + ?message, + "Sending direct message", + ); peer.known_messages.insert(message_hash); network.write_notification(who.clone(), self.protocol.clone(), message); diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 3d8c33eae0f25..604165d10074d 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -63,17 +63,17 @@ wasm-timer = "0.2" zeroize = "1.2.0" [dependencies.libp2p] -version = "0.35.1" +version = "0.36.0" [target.'cfg(target_os = "unknown")'.dependencies.libp2p] -version = "0.35.1" +version = "0.36.0" default-features = false features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "request-response", "tcp-async-io", "websocket", "yamux"] [dev-dependencies] assert_matches = "1.3" -libp2p = { version = "0.35.1", default-features = false } +libp2p = { version = "0.36.0", default-features = false } quickcheck = "1.0.3" rand = "0.7.2" sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 0eebd1713cc81..a73685ed3bf32 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -37,7 +37,7 @@ use libp2p::swarm::{ use log::debug; use prost::Message; use sp_consensus::{BlockOrigin, import_queue::{IncomingBlock, Origin}}; -use sp_runtime::{traits::{Block as BlockT, NumberFor}, Justification}; +use sp_runtime::{traits::{Block as BlockT, NumberFor}, Justifications}; use std::{ borrow::Cow, collections::{HashSet, VecDeque}, @@ -84,7 +84,7 @@ pub struct Behaviour { /// Event generated by `Behaviour`. pub enum BehaviourOut { BlockImport(BlockOrigin, Vec>), - JustificationImport(Origin, B::Hash, NumberFor, Justification), + JustificationImport(Origin, B::Hash, NumberFor, Justifications), /// Started a random iterative Kademlia discovery query. RandomKademliaStarted(ProtocolId), diff --git a/client/network/src/bitswap.rs b/client/network/src/bitswap.rs index 7129f3dbe07b1..aea2b8420cb2c 100644 --- a/client/network/src/bitswap.rs +++ b/client/network/src/bitswap.rs @@ -25,7 +25,6 @@ use std::io; use std::sync::Arc; use std::task::{Context, Poll}; use cid::Version; -use codec::Encode; use core::pin::Pin; use futures::Future; use futures::io::{AsyncRead, AsyncWrite}; @@ -257,15 +256,15 @@ impl NetworkBehaviour for Bitswap { } let mut hash = B::Hash::default(); hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]); - let extrinsic = match self.client.extrinsic(&hash) { + let transaction = match self.client.indexed_transaction(&hash) { Ok(ex) => ex, Err(e) => { - error!(target: LOG_TARGET, "Error retrieving extrinsic {}: {}", hash, e); + error!(target: LOG_TARGET, "Error retrieving transaction {}: {}", hash, e); None } }; - match extrinsic { - Some(extrinsic) => { + match transaction { + Some(transaction) => { trace!(target: LOG_TARGET, "Found CID {:?}, hash {:?}", cid, hash); if entry.want_type == WantType::Block as i32 { let prefix = Prefix { @@ -276,7 +275,7 @@ impl NetworkBehaviour for Bitswap { }; response.payload.push(MessageBlock { prefix: prefix.to_bytes(), - data: extrinsic.encode(), + data: transaction, }); } else { response.block_presences.push(BlockPresence { diff --git a/client/network/src/block_request_handler.rs b/client/network/src/block_request_handler.rs index 148bc01302f7f..2cc888c220f62 100644 --- a/client/network/src/block_request_handler.rs +++ b/client/network/src/block_request_handler.rs @@ -275,12 +275,28 @@ impl BlockRequestHandler { let number = *header.number(); let hash = header.hash(); let parent_hash = *header.parent_hash(); - let justification = if get_justification { - self.client.justification(&BlockId::Hash(hash))? + let justifications = if get_justification { + self.client.justifications(&BlockId::Hash(hash))? } else { None }; - let is_empty_justification = justification.as_ref().map(|j| j.is_empty()).unwrap_or(false); + + // TODO: In a follow up PR tracked by https://github.com/paritytech/substrate/issues/8172 + // we want to send/receive all justifications. + // For now we keep compatibility by selecting precisely the GRANDPA one, and not just + // the first one. When sending we could have just taken the first one, since we don't + // expect there to be any other kind currently, but when receiving we need to add the + // engine ID tag. + // The ID tag is hardcoded here to avoid depending on the GRANDPA crate, and will be + // removed when resolving the above issue. + let justification = justifications.and_then(|just| just.into_justification(*b"FRNK")); + + let is_empty_justification = justification + .as_ref() + .map(|j| j.is_empty()) + .unwrap_or(false); + + let justification = justification.unwrap_or_default(); let body = if get_body { match self.client.block_body(&BlockId::Hash(hash))? { @@ -306,7 +322,7 @@ impl BlockRequestHandler { body, receipt: Vec::new(), message_queue: Vec::new(), - justification: justification.unwrap_or_default(), + justification, is_empty_justification, }; diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 87b533ef77dc1..b7c791e392676 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -61,7 +61,7 @@ use libp2p::kad::handler::KademliaHandlerProto; use libp2p::kad::QueryId; use libp2p::kad::record::{self, store::{MemoryStore, RecordStore}}; #[cfg(not(target_os = "unknown"))] -use libp2p::mdns::{Mdns, MdnsEvent}; +use libp2p::mdns::{Mdns, MdnsConfig, MdnsEvent}; use libp2p::multiaddr::Protocol; use log::{debug, info, trace, warn}; use std::{cmp, collections::{HashMap, HashSet, VecDeque}, io, num::NonZeroUsize, time::Duration}; @@ -220,7 +220,7 @@ impl DiscoveryConfig { discovery_only_if_under_num, #[cfg(not(target_os = "unknown"))] mdns: if enable_mdns { - MdnsWrapper::Instantiating(Mdns::new().boxed()) + MdnsWrapper::Instantiating(Mdns::new(MdnsConfig::default()).boxed()) } else { MdnsWrapper::Disabled }, diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs index 89ad5fcf047dc..b000cf575ddb3 100644 --- a/client/network/src/gossip/tests.rs +++ b/client/network/src/gossip/tests.rs @@ -47,12 +47,14 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) #[derive(Clone)] struct PassThroughVerifier(bool); + + #[async_trait::async_trait] impl sp_consensus::import_queue::Verifier for PassThroughVerifier { - fn verify( + async fn verify( &mut self, origin: sp_consensus::BlockOrigin, header: B::Header, - justification: Option, + justifications: Option, body: Option>, ) -> Result< ( @@ -79,7 +81,7 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) let mut import = sp_consensus::BlockImportParams::new(origin, header); import.body = body; import.finalized = self.0; - import.justification = justification; + import.justifications = justifications; import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); Ok((import, maybe_keys)) } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 7f321775b160e..84b5285b38ada 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -27,7 +27,7 @@ use crate::{ use bytes::Bytes; use codec::{Decode, DecodeAll, Encode}; use futures::{channel::oneshot, prelude::*}; -use generic_proto::{GenericProto, GenericProtoOut}; +use notifications::{Notifications, NotificationsOut}; use libp2p::core::{ConnectedPoint, connection::{ConnectionId, ListenerId}}; use libp2p::request_response::OutboundFailure; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; @@ -43,9 +43,10 @@ use sp_consensus::{ block_validation::BlockAnnounceValidator, import_queue::{BlockImportResult, BlockImportError, IncomingBlock, Origin} }; -use sp_runtime::{generic::BlockId, Justification}; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, Zero, CheckedSub +use sp_runtime::{ + Justifications, + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero, CheckedSub}, }; use sp_arithmetic::traits::SaturatedConversion; use sync::{ChainSync, SyncState}; @@ -55,13 +56,13 @@ use std::collections::{HashMap, HashSet, VecDeque}; use std::sync::Arc; use std::{io, iter, num::NonZeroUsize, pin::Pin, task::Poll, time}; -mod generic_proto; +mod notifications; pub mod message; pub mod event; pub mod sync; -pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError}; +pub use notifications::{NotificationsSink, Ready, NotifsHandlerError}; /// Interval at which we perform time based maintenance const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); @@ -160,7 +161,7 @@ pub struct Protocol { /// Used to report reputation changes. peerset_handle: sc_peerset::PeersetHandle, /// Handles opening the unique substream and sending and receiving raw messages. - behaviour: GenericProto, + behaviour: Notifications, /// List of notifications protocols that have been registered. notification_protocols: Vec>, /// If we receive a new "substream open" event that contains an invalid handshake, we ask the @@ -361,7 +362,7 @@ impl Protocol { genesis_hash, ).encode(); - GenericProto::new( + Notifications::new( peerset, iter::once((block_announces_protocol, block_announces_handshake, MAX_BLOCK_ANNOUNCE_SIZE)) .chain(network_config.extra_sets.iter() @@ -612,8 +613,8 @@ impl Protocol { if request.fields == message::BlockAttributes::JUSTIFICATION { match self.sync.on_block_justification(peer_id, block_response) { Ok(sync::OnBlockJustification::Nothing) => CustomMessageOutcome::None, - Ok(sync::OnBlockJustification::Import { peer, hash, number, justification }) => - CustomMessageOutcome::JustificationImport(peer, hash, number, justification), + Ok(sync::OnBlockJustification::Import { peer, hash, number, justifications }) => + CustomMessageOutcome::JustificationImport(peer, hash, number, justifications), Err(sync::BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); @@ -664,7 +665,7 @@ impl Protocol { if status.genesis_hash != self.genesis_hash { log!( target: "sync", - if self.important_peers.contains(&who) { Level::Warn } else { Level::Trace }, + if self.important_peers.contains(&who) { Level::Warn } else { Level::Debug }, "Peer is on different chain (our genesis: {} theirs: {})", self.genesis_hash, status.genesis_hash ); @@ -1134,7 +1135,7 @@ fn prepare_block_request( #[must_use] pub enum CustomMessageOutcome { BlockImport(BlockOrigin, Vec>), - JustificationImport(Origin, B::Hash, NumberFor, Justification), + JustificationImport(Origin, B::Hash, NumberFor, Justifications), /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, @@ -1168,7 +1169,7 @@ pub enum CustomMessageOutcome { } impl NetworkBehaviour for Protocol { - type ProtocolsHandler = ::ProtocolsHandler; + type ProtocolsHandler = ::ProtocolsHandler; type OutEvent = CustomMessageOutcome; fn new_handler(&mut self) -> Self::ProtocolsHandler { @@ -1331,7 +1332,7 @@ impl NetworkBehaviour for Protocol { }; let outcome = match event { - GenericProtoOut::CustomProtocolOpen { peer_id, set_id, received_handshake, notifications_sink, .. } => { + NotificationsOut::CustomProtocolOpen { peer_id, set_id, received_handshake, notifications_sink, .. } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { // `received_handshake` can be either a `Status` message if received from the @@ -1418,7 +1419,7 @@ impl NetworkBehaviour for Protocol { } } } - GenericProtoOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => { + NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => { if set_id == HARDCODED_PEERSETS_SYNC { CustomMessageOutcome::None } else if self.bad_handshake_substreams.contains(&(peer_id.clone(), set_id)) { @@ -1431,7 +1432,7 @@ impl NetworkBehaviour for Protocol { } } }, - GenericProtoOut::CustomProtocolClosed { peer_id, set_id } => { + NotificationsOut::CustomProtocolClosed { peer_id, set_id } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { if self.on_sync_peer_disconnected(peer_id.clone()).is_ok() { @@ -1456,7 +1457,7 @@ impl NetworkBehaviour for Protocol { } } }, - GenericProtoOut::Notification { peer_id, set_id, message } => + NotificationsOut::Notification { peer_id, set_id, message } => match set_id { HARDCODED_PEERSETS_SYNC if self.peers.contains_key(&peer_id) => { if let Ok(announce) = message::BlockAnnounce::decode(&mut message.as_ref()) { diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index ed2721032801c..01e9a5d7215af 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -148,7 +148,7 @@ pub struct RemoteReadResponse { pub mod generic { use bitflags::bitflags; use codec::{Encode, Decode, Input, Output}; - use sp_runtime::Justification; + use sp_runtime::EncodedJustification; use super::{ RemoteReadResponse, Transactions, Direction, RequestId, BlockAttributes, RemoteCallResponse, ConsensusEngineId, @@ -233,7 +233,7 @@ pub mod generic { /// Block message queue if requested. pub message_queue: Option>, /// Justification if requested. - pub justification: Option, + pub justification: Option, } /// Identifies starting point of a block sequence. diff --git a/client/network/src/protocol/generic_proto.rs b/client/network/src/protocol/notifications.rs similarity index 71% rename from client/network/src/protocol/generic_proto.rs rename to client/network/src/protocol/notifications.rs index a305fc1f5ea5f..ef25795758b80 100644 --- a/client/network/src/protocol/generic_proto.rs +++ b/client/network/src/protocol/notifications.rs @@ -16,13 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Implementation of libp2p's `NetworkBehaviour` trait that opens a single substream with the -//! remote and then allows any communication with them. -//! -//! The `Protocol` struct uses `GenericProto` in order to open substreams with the rest of the -//! network, then performs the Substrate protocol handling on top. +//! Implementation of libp2p's `NetworkBehaviour` trait that establishes communications and opens +//! notifications substreams. -pub use self::behaviour::{GenericProto, GenericProtoOut}; +pub use self::behaviour::{Notifications, NotificationsOut}; pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready}; mod behaviour; diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs similarity index 98% rename from client/network/src/protocol/generic_proto/behaviour.rs rename to client/network/src/protocol/notifications/behaviour.rs index 05247dc6f0e68..08c4ec5d4f7b3 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::protocol::generic_proto::{ +use crate::protocol::notifications::{ handler::{NotificationsSink, NotifsHandlerProto, NotifsHandlerOut, NotifsHandlerIn} }; @@ -44,7 +44,7 @@ use wasm_timer::Instant; /// /// # How it works /// -/// The role of the `GenericProto` is to synchronize the following components: +/// The role of the `Notifications` is to synchronize the following components: /// /// - The libp2p swarm that opens new connections and reports disconnects. /// - The connection handler (see `group.rs`) that handles individual connections. @@ -83,9 +83,9 @@ use wasm_timer::Instant; /// different than a single connection failing and being re-established /// in terms of potential reordering and dropped messages. Messages can /// be received on any connection. -/// 3. The behaviour reports `GenericProtoOut::CustomProtocolOpen` when the +/// 3. The behaviour reports `NotificationsOut::CustomProtocolOpen` when the /// first connection reports `NotifsHandlerOut::OpenResultOk`. -/// 4. The behaviour reports `GenericProtoOut::CustomProtocolClosed` when the +/// 4. The behaviour reports `NotificationsOut::CustomProtocolClosed` when the /// last connection reports `NotifsHandlerOut::ClosedResult`. /// /// In this way, the number of actual established connections to the peer is @@ -94,7 +94,7 @@ use wasm_timer::Instant; /// and only as a result of simultaneous dialing. However, the implementation /// accommodates for any number of connections. /// -pub struct GenericProto { +pub struct Notifications { /// Notification protocols. Entries are only ever added and not removed. /// Contains, for each protocol, the protocol name and the message to send as part of the /// initial handshake. @@ -127,7 +127,7 @@ pub struct GenericProto { next_incoming_index: sc_peerset::IncomingIndex, /// Events to produce from `poll()`. - events: VecDeque>, + events: VecDeque>, } /// Identifier for a delay firing. @@ -302,9 +302,9 @@ struct IncomingPeer { incoming_id: sc_peerset::IncomingIndex, } -/// Event that can be emitted by the `GenericProto`. +/// Event that can be emitted by the `Notifications`. #[derive(Debug)] -pub enum GenericProtoOut { +pub enum NotificationsOut { /// Opened a custom protocol with the remote. CustomProtocolOpen { /// Id of the peer we are connected to. @@ -354,7 +354,7 @@ pub enum GenericProtoOut { }, } -impl GenericProto { +impl Notifications { /// Creates a `CustomProtos`. pub fn new( peerset: sc_peerset::Peerset, @@ -366,7 +366,7 @@ impl GenericProto { assert!(!notif_protocols.is_empty()); - GenericProto { + Notifications { notif_protocols, peerset, peers: FnvHashMap::default(), @@ -462,7 +462,7 @@ impl GenericProto { if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { debug!(target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id); - let event = GenericProtoOut::CustomProtocolClosed { + let event = NotificationsOut::CustomProtocolClosed { peer_id: peer_id.clone(), set_id, }; @@ -828,7 +828,7 @@ impl GenericProto { if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { debug!(target: "sub-libp2p", "External API <= Closed({}, {:?})", entry.key().0, set_id); - let event = GenericProtoOut::CustomProtocolClosed { + let event = NotificationsOut::CustomProtocolClosed { peer_id: entry.key().0.clone(), set_id, }; @@ -1013,9 +1013,9 @@ impl GenericProto { } } -impl NetworkBehaviour for GenericProto { +impl NetworkBehaviour for Notifications { type ProtocolsHandler = NotifsHandlerProto; - type OutEvent = GenericProtoOut; + type OutEvent = NotificationsOut; fn new_handler(&mut self) -> Self::ProtocolsHandler { NotifsHandlerProto::new(self.notif_protocols.clone()) @@ -1265,7 +1265,7 @@ impl NetworkBehaviour for GenericProto { "External API <= Sink replaced({}, {:?})", peer_id, set_id ); - let event = GenericProtoOut::CustomProtocolReplaced { + let event = NotificationsOut::CustomProtocolReplaced { peer_id: peer_id.clone(), set_id, notifications_sink: replacement_sink, @@ -1277,7 +1277,7 @@ impl NetworkBehaviour for GenericProto { target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id ); - let event = GenericProtoOut::CustomProtocolClosed { + let event = NotificationsOut::CustomProtocolClosed { peer_id: peer_id.clone(), set_id, }; @@ -1642,7 +1642,7 @@ impl NetworkBehaviour for GenericProto { { if pos <= replacement_pos { debug!(target: "sub-libp2p", "External API <= Sink replaced({:?})", source); - let event = GenericProtoOut::CustomProtocolReplaced { + let event = NotificationsOut::CustomProtocolReplaced { peer_id: source, set_id, notifications_sink: replacement_sink, @@ -1665,7 +1665,7 @@ impl NetworkBehaviour for GenericProto { } debug!(target: "sub-libp2p", "External API <= Closed({}, {:?})", source, set_id); - let event = GenericProtoOut::CustomProtocolClosed { + let event = NotificationsOut::CustomProtocolClosed { peer_id: source, set_id, }; @@ -1739,7 +1739,7 @@ impl NetworkBehaviour for GenericProto { { if !any_open { debug!(target: "sub-libp2p", "External API <= Open({:?})", source); - let event = GenericProtoOut::CustomProtocolOpen { + let event = NotificationsOut::CustomProtocolOpen { peer_id: source, set_id, received_handshake, @@ -1876,7 +1876,7 @@ impl NetworkBehaviour for GenericProto { ); trace!(target: "sub-libp2p", "External API <= Message({}, {:?})", source, set_id); - let event = GenericProtoOut::Notification { + let event = NotificationsOut::Notification { peer_id: source, set_id, message, diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/notifications/handler.rs similarity index 92% rename from client/network/src/protocol/generic_proto/handler.rs rename to client/network/src/protocol/notifications/handler.rs index 0db249f90a8b7..99677cc45e54e 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/notifications/handler.rs @@ -57,7 +57,7 @@ //! It is illegal to send a [`NotifsHandlerIn::Open`] before a previously-emitted //! [`NotifsHandlerIn::Open`] has gotten an answer. -use crate::protocol::generic_proto::{ +use crate::protocol::notifications::{ upgrade::{ NotificationsIn, NotificationsOut, NotificationsInSubstream, NotificationsOutSubstream, NotificationsHandshakeError, UpgradeCollec @@ -188,10 +188,10 @@ enum State { /// We use two different channels in order to have two different channel sizes, but from /// the receiving point of view, the two channels are the same. /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. - notifications_sink_rx: stream::Select< + notifications_sink_rx: stream::Peekable>, stream::Fuse> - >, + >>, /// Outbound substream that has been accepted by the remote. /// @@ -552,7 +552,7 @@ impl ProtocolsHandler for NotifsHandler { }; self.protocols[protocol_index].state = State::Open { - notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), + notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()).peekable(), out_substream: Some(substream), in_substream: in_substream.take(), }; @@ -716,8 +716,80 @@ impl ProtocolsHandler for NotifsHandler { return Poll::Ready(ev); } + // For each open substream, try send messages from `notifications_sink_rx` to the + // substream. + for protocol_index in 0..self.protocols.len() { + if let State::Open { notifications_sink_rx, out_substream: Some(out_substream), .. } + = &mut self.protocols[protocol_index].state + { + loop { + // Only proceed with `out_substream.poll_ready_unpin` if there is an element + // available in `notifications_sink_rx`. This avoids waking up the task when + // a substream is ready to send if there isn't actually something to send. + match Pin::new(&mut *notifications_sink_rx).as_mut().poll_peek(cx) { + Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => { + return Poll::Ready( + ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged) + ); + }, + Poll::Ready(Some(&NotificationsSinkMessage::Notification { .. })) => {}, + Poll::Ready(None) | Poll::Pending => break, + } + + // Before we extract the element from `notifications_sink_rx`, check that the + // substream is ready to accept a message. + match out_substream.poll_ready_unpin(cx) { + Poll::Ready(_) => {}, + Poll::Pending => break + } + + // Now that the substream is ready for a message, grab what to send. + let message = match notifications_sink_rx.poll_next_unpin(cx) { + Poll::Ready(Some(NotificationsSinkMessage::Notification { message })) => message, + Poll::Ready(Some(NotificationsSinkMessage::ForceClose)) + | Poll::Ready(None) + | Poll::Pending => { + // Should never be reached, as per `poll_peek` above. + debug_assert!(false); + break; + } + }; + + let _ = out_substream.start_send_unpin(message); + // Note that flushing is performed later down this function. + } + } + } + + // Flush all outbound substreams. + // When `poll` returns `Poll::Ready`, the libp2p `Swarm` may decide to no longer call + // `poll` again before it is ready to accept more events. + // In order to make sure that substreams are flushed as soon as possible, the flush is + // performed before the code paths that can produce `Ready` (with some rare exceptions). + // Importantly, however, the flush is performed *after* notifications are queued with + // `Sink::start_send`. + for protocol_index in 0..self.protocols.len() { + match &mut self.protocols[protocol_index].state { + State::Open { out_substream: out_substream @ Some(_), .. } => { + match Sink::poll_flush(Pin::new(out_substream.as_mut().unwrap()), cx) { + Poll::Pending | Poll::Ready(Ok(())) => {}, + Poll::Ready(Err(_)) => { + *out_substream = None; + let event = NotifsHandlerOut::CloseDesired { protocol_index }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(event)); + } + }; + } + + State::Closed { .. } | + State::Opening { .. } | + State::Open { out_substream: None, .. } | + State::OpenDesiredByRemote { .. } => {} + } + } + + // Poll inbound substreams. for protocol_index in 0..self.protocols.len() { - // Poll inbound substreams. // Inbound substreams being closed is always tolerated, except for the // `OpenDesiredByRemote` state which might need to be switched back to `Closed`. match &mut self.protocols[protocol_index].state { @@ -763,68 +835,11 @@ impl ProtocolsHandler for NotifsHandler { } } } - - // Poll outbound substream. - match &mut self.protocols[protocol_index].state { - State::Open { out_substream: out_substream @ Some(_), .. } => { - match Sink::poll_flush(Pin::new(out_substream.as_mut().unwrap()), cx) { - Poll::Pending | Poll::Ready(Ok(())) => {}, - Poll::Ready(Err(_)) => { - *out_substream = None; - let event = NotifsHandlerOut::CloseDesired { protocol_index }; - return Poll::Ready(ProtocolsHandlerEvent::Custom(event)); - } - }; - } - - State::Closed { .. } | - State::Opening { .. } | - State::Open { out_substream: None, .. } | - State::OpenDesiredByRemote { .. } => {} - } - - if let State::Open { notifications_sink_rx, out_substream: Some(out_substream), .. } - = &mut self.protocols[protocol_index].state - { - loop { - // Before we poll the notifications sink receiver, check that the substream - // is ready to accept a message. - match out_substream.poll_ready_unpin(cx) { - Poll::Ready(_) => {}, - Poll::Pending => break - } - - // Now that all substreams are ready for a message, grab what to send. - let message = match notifications_sink_rx.poll_next_unpin(cx) { - Poll::Ready(Some(msg)) => msg, - Poll::Ready(None) | Poll::Pending => break, - }; - - match message { - NotificationsSinkMessage::Notification { message } => { - let _ = out_substream.start_send_unpin(message); - - // Calling `start_send_unpin` only queues the message. Actually - // emitting the message is done with `poll_flush`. In order to - // not introduce too much complexity, this flushing is done earlier - // in the body of this `poll()` method. As such, we schedule a task - // wake-up now in order to guarantee that `poll()` will be called - // again and the flush happening. - // At the time of the writing of this comment, a rewrite of this - // code is being planned. If you find this comment in the wild and - // the rewrite didn't happen, please consider a refactor. - cx.waker().wake_by_ref(); - } - NotificationsSinkMessage::ForceClose => { - return Poll::Ready( - ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged) - ); - } - } - } - } } + // This is the only place in this method that can return `Pending`. + // By putting it at the very bottom, we are guaranteed that everything has been properly + // polled. Poll::Pending } } diff --git a/client/network/src/protocol/generic_proto/tests.rs b/client/network/src/protocol/notifications/tests.rs similarity index 92% rename from client/network/src/protocol/generic_proto/tests.rs rename to client/network/src/protocol/notifications/tests.rs index 2c80fe8523ac3..f159a8e631782 100644 --- a/client/network/src/protocol/generic_proto/tests.rs +++ b/client/network/src/protocol/notifications/tests.rs @@ -18,7 +18,7 @@ #![cfg(test)] -use crate::protocol::generic_proto::{GenericProto, GenericProtoOut}; +use crate::protocol::notifications::{Notifications, NotificationsOut}; use futures::prelude::*; use libp2p::{PeerId, Multiaddr, Transport}; @@ -80,7 +80,7 @@ fn build_nodes() -> (Swarm, Swarm) { }); let behaviour = CustomProtoWithAddr { - inner: GenericProto::new(peerset, iter::once(("/foo".into(), Vec::new(), 1024 * 1024))), + inner: Notifications::new(peerset, iter::once(("/foo".into(), Vec::new(), 1024 * 1024))), addrs: addrs .iter() .enumerate() @@ -110,12 +110,12 @@ fn build_nodes() -> (Swarm, Swarm) { /// Wraps around the `CustomBehaviour` network behaviour, and adds hardcoded node addresses to it. struct CustomProtoWithAddr { - inner: GenericProto, + inner: Notifications, addrs: Vec<(PeerId, Multiaddr)>, } impl std::ops::Deref for CustomProtoWithAddr { - type Target = GenericProto; + type Target = Notifications; fn deref(&self) -> &Self::Target { &self.inner @@ -129,8 +129,8 @@ impl std::ops::DerefMut for CustomProtoWithAddr { } impl NetworkBehaviour for CustomProtoWithAddr { - type ProtocolsHandler = ::ProtocolsHandler; - type OutEvent = ::OutEvent; + type ProtocolsHandler = ::ProtocolsHandler; + type OutEvent = ::OutEvent; fn new_handler(&mut self) -> Self::ProtocolsHandler { self.inner.new_handler() @@ -240,7 +240,7 @@ fn reconnect_after_disconnect() { }; match event { - future::Either::Left(GenericProtoOut::CustomProtocolOpen { .. }) => { + future::Either::Left(NotificationsOut::CustomProtocolOpen { .. }) => { match service1_state { ServiceState::NotConnected => { service1_state = ServiceState::FirstConnec; @@ -255,14 +255,14 @@ fn reconnect_after_disconnect() { ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), } }, - future::Either::Left(GenericProtoOut::CustomProtocolClosed { .. }) => { + future::Either::Left(NotificationsOut::CustomProtocolClosed { .. }) => { match service1_state { ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, ServiceState::ConnectedAgain| ServiceState::NotConnected | ServiceState::Disconnected => panic!(), } }, - future::Either::Right(GenericProtoOut::CustomProtocolOpen { .. }) => { + future::Either::Right(NotificationsOut::CustomProtocolOpen { .. }) => { match service2_state { ServiceState::NotConnected => { service2_state = ServiceState::FirstConnec; @@ -277,7 +277,7 @@ fn reconnect_after_disconnect() { ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), } }, - future::Either::Right(GenericProtoOut::CustomProtocolClosed { .. }) => { + future::Either::Right(NotificationsOut::CustomProtocolClosed { .. }) => { match service2_state { ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, ServiceState::ConnectedAgain| ServiceState::NotConnected | @@ -310,8 +310,8 @@ fn reconnect_after_disconnect() { }; match event { - GenericProtoOut::CustomProtocolOpen { .. } | - GenericProtoOut::CustomProtocolClosed { .. } => panic!(), + NotificationsOut::CustomProtocolOpen { .. } | + NotificationsOut::CustomProtocolClosed { .. } => panic!(), _ => {} } } diff --git a/client/network/src/protocol/generic_proto/upgrade.rs b/client/network/src/protocol/notifications/upgrade.rs similarity index 100% rename from client/network/src/protocol/generic_proto/upgrade.rs rename to client/network/src/protocol/notifications/upgrade.rs diff --git a/client/network/src/protocol/generic_proto/upgrade/collec.rs b/client/network/src/protocol/notifications/upgrade/collec.rs similarity index 100% rename from client/network/src/protocol/generic_proto/upgrade/collec.rs rename to client/network/src/protocol/notifications/upgrade/collec.rs diff --git a/client/network/src/protocol/generic_proto/upgrade/notifications.rs b/client/network/src/protocol/notifications/upgrade/notifications.rs similarity index 100% rename from client/network/src/protocol/generic_proto/upgrade/notifications.rs rename to client/network/src/protocol/notifications/upgrade/notifications.rs diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 35f840152217f..d7d0f66750b6f 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -44,7 +44,7 @@ use extra_requests::ExtraRequests; use libp2p::PeerId; use log::{debug, trace, warn, info, error}; use sp_runtime::{ - Justification, + EncodedJustification, Justifications, generic::BlockId, traits::{ Block as BlockT, Header as HeaderT, NumberFor, Zero, One, CheckedSub, SaturatedConversion, @@ -425,7 +425,7 @@ pub enum OnBlockJustification { peer: PeerId, hash: B::Hash, number: NumberFor, - justification: Justification + justifications: Justifications } } @@ -823,11 +823,13 @@ impl ChainSync { .drain(self.best_queued_number + One::one()) .into_iter() .map(|block_data| { + let justifications = + legacy_justification_mapping(block_data.block.justification); IncomingBlock { hash: block_data.block.hash, header: block_data.block.header, body: block_data.block.body, - justification: block_data.block.justification, + justifications, origin: block_data.origin, allow_missing_state: true, import_existing: false, @@ -846,7 +848,7 @@ impl ChainSync { hash: b.hash, header: b.header, body: b.body, - justification: b.justification, + justifications: legacy_justification_mapping(b.justification), origin: Some(who.clone()), allow_missing_state: true, import_existing: false, @@ -955,7 +957,7 @@ impl ChainSync { hash: b.hash, header: b.header, body: b.body, - justification: b.justification, + justifications: legacy_justification_mapping(b.justification), origin: Some(who.clone()), allow_missing_state: true, import_existing: false, @@ -1039,8 +1041,11 @@ impl ChainSync { None }; - if let Some((peer, hash, number, j)) = self.extra_justifications.on_response(who, justification) { - return Ok(OnBlockJustification::Import { peer, hash, number, justification: j }) + if let Some((peer, hash, number, j)) = self + .extra_justifications + .on_response(who, legacy_justification_mapping(justification)) + { + return Ok(OnBlockJustification::Import { peer, hash, number, justifications: j }) } } @@ -1547,11 +1552,13 @@ impl ChainSync { debug!(target:"sync", "Restarted with {} ({})", self.best_queued_number, self.best_queued_hash); let old_peers = std::mem::take(&mut self.peers); - old_peers.into_iter().filter_map(move |(id, p)| { + old_peers.into_iter().filter_map(move |(id, mut p)| { // peers that were downloading justifications // should be kept in that state. match p.state { PeerSyncState::DownloadingJustification(_) => { + // We make sure our commmon number is at least something we have. + p.common_number = info.best_number; self.peers.insert(id, p); return None; } @@ -1597,6 +1604,14 @@ impl ChainSync { } } +// This is purely during a backwards compatible transitionary period and should be removed +// once we can assume all nodes can send and receive multiple Justifications +// The ID tag is hardcoded here to avoid depending on the GRANDPA crate. +// TODO: https://github.com/paritytech/substrate/issues/8172 +fn legacy_justification_mapping(justification: Option) -> Option { + justification.map(|just| (*b"FRNK", just).into()) +} + #[derive(Debug)] pub(crate) struct Metrics { pub(crate) queued_blocks: u32, @@ -2003,7 +2018,7 @@ mod test { let mut new_blocks = |n| { for _ in 0..n { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); } let info = client.info(); @@ -2051,6 +2066,14 @@ mod test { sync.peers.get(&peer_id3).unwrap().state, PeerSyncState::DownloadingJustification(b1_hash), ); + + // Set common block to something that we don't have (e.g. failed import) + sync.peers.get_mut(&peer_id3).unwrap().common_number = 100; + let _ = sync.restart().count(); + assert_eq!( + sync.peers.get(&peer_id3).unwrap().common_number, + 50 + ); } /// Send a block annoucnement for the given `header`. @@ -2134,7 +2157,7 @@ mod test { let block = block_builder.build().unwrap().block; - client.import(BlockOrigin::Own, block.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); block } @@ -2175,7 +2198,7 @@ mod test { let block = block_builder.build().unwrap().block; if import { - client2.import(BlockOrigin::Own, block.clone()).unwrap(); + block_on(client2.import(BlockOrigin::Own, block.clone())).unwrap(); } block @@ -2200,7 +2223,7 @@ mod test { send_block_announce(block3_fork.header().clone(), &peer_id2, &mut sync); // Import and tell sync that we now have the fork. - client.import(BlockOrigin::Own, block3_fork.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, block3_fork.clone())).unwrap(); sync.update_chain_info(&block3_fork.hash(), 3); let block4 = build_block_at(block3_fork.hash(), false); @@ -2312,7 +2335,7 @@ mod test { resp_blocks.into_iter() .rev() - .for_each(|b| client.import_as_final(BlockOrigin::Own, b).unwrap()); + .for_each(|b| block_on(client.import_as_final(BlockOrigin::Own, b)).unwrap()); } // Let peer2 announce that it finished syncing @@ -2375,7 +2398,7 @@ mod test { let mut client = Arc::new(TestClientBuilder::new().build()); let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] .into_iter() - .inspect(|b| client.import(BlockOrigin::Own, (*b).clone()).unwrap()) + .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) .cloned() .collect::>(); @@ -2396,7 +2419,8 @@ mod test { ); let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); - client.finalize_block(BlockId::Hash(finalized_block.hash()), Some(Vec::new())).unwrap(); + let just = (*b"TEST", Vec::new()); + client.finalize_block(BlockId::Hash(finalized_block.hash()), Some(just)).unwrap(); sync.update_chain_info(&info.best_hash, info.best_number); let peer_id1 = PeerId::random(); @@ -2478,7 +2502,7 @@ mod test { resp_blocks.into_iter() .rev() - .for_each(|b| client.import(BlockOrigin::Own, b).unwrap()); + .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); } // Request the tip diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 74ce9316fc41c..7ea66799bad35 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -647,6 +647,13 @@ impl NetworkService { .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); } + /// Adds an address known to a node. + pub fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + } + /// Appends a notification to the buffer of pending outgoing notifications with the given peer. /// Has no effect if the notifications channel with this protocol name is not open. /// @@ -1452,11 +1459,11 @@ impl Future for NetworkWorker { } this.import_queue.import_blocks(origin, blocks); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport(origin, hash, nb, justification))) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport(origin, hash, nb, justifications))) => { if let Some(metrics) = this.metrics.as_ref() { metrics.import_queue_justifications_submitted.inc(); } - this.import_queue.import_justification(origin, hash, nb, justification); + this.import_queue.import_justifications(origin, hash, nb, justifications); }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. })) => { if let Some(metrics) = this.metrics.as_ref() { diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 660eac82c4c60..dd4a0597cbcbc 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -47,12 +47,14 @@ fn build_test_full_node(config: config::NetworkConfiguration) #[derive(Clone)] struct PassThroughVerifier(bool); + + #[async_trait::async_trait] impl sp_consensus::import_queue::Verifier for PassThroughVerifier { - fn verify( + async fn verify( &mut self, origin: sp_consensus::BlockOrigin, header: B::Header, - justification: Option, + justifications: Option, body: Option>, ) -> Result< ( @@ -79,7 +81,7 @@ fn build_test_full_node(config: config::NetworkConfiguration) let mut import = sp_consensus::BlockImportParams::new(origin, header); import.body = body; import.finalized = self.0; - import.justification = justification; + import.justifications = justifications; import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); Ok((import, maybe_keys)) } diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs index 12c82c0fcefd0..ab587e01a875b 100644 --- a/client/network/src/transport.rs +++ b/client/network/src/transport.rs @@ -63,10 +63,11 @@ pub fn build_transport( let desktop_trans = tcp::TcpConfig::new().nodelay(true); let desktop_trans = websocket::WsConfig::new(desktop_trans.clone()) .or_transport(desktop_trans); - OptionalTransport::some(if let Ok(dns) = dns::DnsConfig::new(desktop_trans.clone()) { + let dns_init = futures::executor::block_on(dns::DnsConfig::system(desktop_trans.clone())); + OptionalTransport::some(if let Ok(dns) = dns_init { EitherTransport::Left(dns) } else { - EitherTransport::Right(desktop_trans.map_err(dns::DnsErr::Underlying)) + EitherTransport::Right(desktop_trans.map_err(dns::DnsErr::Transport)) }) } else { OptionalTransport::none() diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 009315084cc32..4fc1aa740040d 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -20,7 +20,7 @@ parking_lot = "0.11.1" futures = "0.3.9" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.35.1", default-features = false } +libp2p = { version = "0.36.0", default-features = false } sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.9.0", path = "../../consensus/common" } sc-client-api = { version = "3.0.0", path = "../../api" } @@ -34,3 +34,4 @@ substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtim tempfile = "3.1.0" sp-tracing = { version = "3.0.0", path = "../../../primitives/tracing" } sc-service = { version = "0.9.0", default-features = false, features = ["test-helpers"], path = "../../service" } +async-trait = "0.1.42" diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index 4000e53420b4a..b3641d4b41214 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -26,22 +26,23 @@ use substrate_test_runtime_client::{self, prelude::*}; use substrate_test_runtime_client::runtime::{Block, Hash}; use sp_runtime::generic::BlockId; use sc_block_builder::BlockBuilderProvider; +use futures::executor::block_on; use super::*; fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) { let mut client = substrate_test_runtime_client::new(); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::File, block).unwrap(); + block_on(client.import(BlockOrigin::File, block)).unwrap(); let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); let header = client.header(&BlockId::Number(1)).unwrap(); - let justification = client.justification(&BlockId::Number(1)).unwrap(); + let justifications = client.justifications(&BlockId::Number(1)).unwrap(); let peer_id = PeerId::random(); (client, hash, number, peer_id.clone(), IncomingBlock { hash, header, body: Some(Vec::new()), - justification, + justifications, origin: Some(peer_id.clone()), allow_missing_state: false, import_existing: false, @@ -55,12 +56,12 @@ fn import_single_good_block_works() { let mut expected_aux = ImportedAux::default(); expected_aux.is_new_best = true; - match import_single_block( + match block_on(import_single_block( &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, &mut PassThroughVerifier::new(true) - ) { + )) { Ok(BlockImportResult::ImportedUnknown(ref num, ref aux, ref org)) if *num == number && *aux == expected_aux && *org == Some(peer_id) => {} r @ _ => panic!("{:?}", r) @@ -70,12 +71,12 @@ fn import_single_good_block_works() { #[test] fn import_single_good_known_block_is_ignored() { let (mut client, _hash, number, _, block) = prepare_good_block(); - match import_single_block( + match block_on(import_single_block( &mut client, BlockOrigin::File, block, &mut PassThroughVerifier::new(true) - ) { + )) { Ok(BlockImportResult::ImportedKnown(ref n, _)) if *n == number => {} _ => panic!() } @@ -85,12 +86,12 @@ fn import_single_good_known_block_is_ignored() { fn import_single_good_block_without_header_fails() { let (_, _, _, peer_id, mut block) = prepare_good_block(); block.header = None; - match import_single_block( + match block_on(import_single_block( &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, &mut PassThroughVerifier::new(true) - ) { + )) { Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id) => {} _ => panic!() } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index c8b442d0dd563..5e05f57175490 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -23,8 +23,7 @@ mod block_import; mod sync; use std::{ - borrow::Cow, collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData, - task::{Poll, Context as FutureContext} + borrow::Cow, collections::HashMap, pin::Pin, sync::Arc, task::{Poll, Context as FutureContext} }; use libp2p::build_multiaddr; @@ -63,8 +62,8 @@ use sp_core::H256; use sc_network::config::ProtocolConfig; use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_runtime::Justification; -use substrate_test_runtime_client::{self, AccountKeyring}; +use sp_runtime::{Justification, Justifications}; +use substrate_test_runtime_client::AccountKeyring; use sc_service::client::Client; pub use sc_network::config::EmptyTransactionPool; pub use substrate_test_runtime_client::runtime::{Block, Extrinsic, Hash, Transfer}; @@ -104,12 +103,13 @@ impl PassThroughVerifier { } /// This `Verifier` accepts all data as valid. +#[async_trait::async_trait] impl Verifier for PassThroughVerifier { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: B::Header, - justification: Option, + justifications: Option, body: Option> ) -> Result<(BlockImportParams, Option)>>), String> { let maybe_keys = header.digest() @@ -120,7 +120,7 @@ impl Verifier for PassThroughVerifier { let mut import = BlockImportParams::new(origin, header); import.body = body; import.finalized = self.finalized; - import.justification = justification; + import.justifications = justifications; import.fork_choice = Some(self.fork_choice.clone()); Ok((import, maybe_keys)) @@ -154,13 +154,8 @@ impl PeersClient { } } - pub fn as_block_import(&self) -> BlockImportAdapter { - match *self { - PeersClient::Full(ref client, ref _backend) => - BlockImportAdapter::new_full(client.clone()), - PeersClient::Light(ref client, ref _backend) => - BlockImportAdapter::Light(Arc::new(Mutex::new(client.clone())), PhantomData), - } + pub fn as_block_import(&self) -> BlockImportAdapter { + BlockImportAdapter::new(self.clone()) } pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { @@ -184,10 +179,10 @@ impl PeersClient { } } - pub fn justification(&self, block: &BlockId) -> ClientResult> { + pub fn justifications(&self, block: &BlockId) -> ClientResult> { match *self { - PeersClient::Full(ref client, ref _backend) => client.justification(block), - PeersClient::Light(ref client, ref _backend) => client.justification(block), + PeersClient::Full(ref client, ref _backend) => client.justifications(block), + PeersClient::Light(ref client, ref _backend) => client.justifications(block), } } @@ -218,7 +213,36 @@ impl PeersClient { } } -pub struct Peer { +#[async_trait::async_trait] +impl BlockImport for PeersClient { + type Error = ConsensusError; + type Transaction = (); + + async fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + match self { + PeersClient::Full(client, _) => client.check_block(block).await, + PeersClient::Light(client, _) => client.check_block(block).await, + } + } + + async fn import_block( + &mut self, + block: BlockImportParams, + cache: HashMap>, + ) -> Result { + match self { + PeersClient::Full(client, _) => + client.import_block(block.convert_transaction(), cache).await, + PeersClient::Light(client, _) => + client.import_block(block.convert_transaction(), cache).await, + } + } +} + +pub struct Peer { pub data: D, client: PeersClient, /// We keep a copy of the verifier so that we can invoke it for locally-generated blocks, @@ -226,7 +250,7 @@ pub struct Peer { verifier: VerifierAdapter, /// We keep a copy of the block_import so that we can invoke it for locally-generated blocks, /// instead of going through the import queue. - block_import: BlockImportAdapter<()>, + block_import: BlockImportAdapter, select_chain: Option>, backend: Option>, network: NetworkWorker::Hash>, @@ -235,7 +259,10 @@ pub struct Peer { listen_addr: Multiaddr, } -impl Peer { +impl Peer where + B: BlockImport + Send + Sync, + B::Transaction: Send, +{ /// Get this peer ID. pub fn id(&self) -> PeerId { self.network.service().local_peer_id().clone() @@ -277,13 +304,24 @@ impl Peer { } /// Request explicit fork sync. - pub fn set_sync_fork_request(&self, peers: Vec, hash: ::Hash, number: NumberFor) { + pub fn set_sync_fork_request( + &self, + peers: Vec, + hash: ::Hash, + number: NumberFor, + ) { self.network.service().set_sync_fork_request(peers, hash, number); } /// Add blocks to the peer -- edit the block before adding - pub fn generate_blocks(&mut self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 - where F: FnMut(BlockBuilder) -> Block + pub fn generate_blocks( + &mut self, + count: usize, + origin: BlockOrigin, + edit_block: F, + ) -> H256 + where + F: FnMut(BlockBuilder) -> Block { let best_hash = self.client.info().best_hash; self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false, true, true) @@ -320,19 +358,21 @@ impl Peer { block.header.parent_hash, ); let header = block.header.clone(); - let (import_block, cache) = self.verifier.verify( + let (import_block, cache) = futures::executor::block_on(self.verifier.verify( origin, header.clone(), None, if headers_only { None } else { Some(block.extrinsics) }, - ).unwrap(); + )).unwrap(); let cache = if let Some(cache) = cache { cache.into_iter().collect() } else { Default::default() }; - self.block_import.import_block(import_block, cache).expect("block_import failed"); + futures::executor::block_on( + self.block_import.import_block(import_block, cache) + ).expect("block_import failed"); if announce_block { self.network.service().announce_block(hash, None); } @@ -478,120 +518,107 @@ impl Peer { } } +pub trait BlockImportAdapterFull: + BlockImport< + Block, + Transaction = TransactionFor, + Error = ConsensusError, + > + + Send + + Sync + + Clone +{} + +impl BlockImportAdapterFull for T where + T: BlockImport< + Block, + Transaction = TransactionFor, + Error = ConsensusError, + > + + Send + + Sync + + Clone +{} + /// Implements `BlockImport` for any `Transaction`. Internally the transaction is /// "converted", aka the field is set to `None`. /// /// This is required as the `TestNetFactory` trait does not distinguish between /// full and light nodes. -pub enum BlockImportAdapter { - Full( - Arc, - Error = ConsensusError - > + Send>>, - PhantomData, - ), - Light( - Arc, - Error = ConsensusError - > + Send>>, - PhantomData, - ), +#[derive(Clone)] +pub struct BlockImportAdapter { + inner: I, } -impl BlockImportAdapter { +impl BlockImportAdapter { /// Create a new instance of `Self::Full`. - pub fn new_full( - full: impl BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError - > - + 'static - + Send - ) -> Self { - Self::Full(Arc::new(Mutex::new(full)), PhantomData) - } - - /// Create a new instance of `Self::Light`. - pub fn new_light( - light: impl BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError - > - + 'static - + Send - ) -> Self { - Self::Light(Arc::new(Mutex::new(light)), PhantomData) - } -} - -impl Clone for BlockImportAdapter { - fn clone(&self) -> Self { - match self { - Self::Full(full, _) => Self::Full(full.clone(), PhantomData), - Self::Light(light, _) => Self::Light(light.clone(), PhantomData), + pub fn new(inner: I) -> Self { + Self { + inner, } } } -impl BlockImport for BlockImportAdapter { +#[async_trait::async_trait] +impl BlockImport for BlockImportAdapter where + I: BlockImport + Send + Sync, + I::Transaction: Send, +{ type Error = ConsensusError; - type Transaction = Transaction; + type Transaction = (); - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - match self { - Self::Full(full, _) => full.lock().check_block(block), - Self::Light(light, _) => light.lock().check_block(block), - } + self.inner.check_block(block).await } - fn import_block( + async fn import_block( &mut self, - block: BlockImportParams, + block: BlockImportParams, cache: HashMap>, ) -> Result { - match self { - Self::Full(full, _) => full.lock().import_block(block.convert_transaction(), cache), - Self::Light(light, _) => light.lock().import_block(block.convert_transaction(), cache), - } + self.inner.import_block(block.convert_transaction(), cache).await } } -/// Implements `Verifier` on an `Arc>`. Used internally. -#[derive(Clone)] +/// Implements `Verifier` and keeps track of failed verifications. struct VerifierAdapter { - verifier: Arc>>>, + verifier: Arc>>>, failed_verifications: Arc>>, } +#[async_trait::async_trait] impl Verifier for VerifierAdapter { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: B::Header, - justification: Option, + justifications: Option, body: Option> ) -> Result<(BlockImportParams, Option)>>), String> { let hash = header.hash(); - self.verifier.lock().verify(origin, header, justification, body).map_err(|e| { + self.verifier.lock().await.verify(origin, header, justifications, body).await.map_err(|e| { self.failed_verifications.lock().insert(hash, e.clone()); e }) } } +impl Clone for VerifierAdapter { + fn clone(&self) -> Self { + Self { + verifier: self.verifier.clone(), + failed_verifications: self.failed_verifications.clone(), + } + } +} + impl VerifierAdapter { - fn new(verifier: Arc>>>) -> VerifierAdapter { + fn new(verifier: impl Verifier + 'static) -> Self { VerifierAdapter { - verifier, + verifier: Arc::new(futures::lock::Mutex::new(Box::new(verifier))), failed_verifications: Default::default(), } } @@ -610,10 +637,13 @@ pub struct FullPeerConfig { /// /// If `None`, it will be connected to all other peers. pub connect_to_peers: Option>, + /// Whether the full peer should have the authority role. + pub is_authority: bool, } -pub trait TestNetFactory: Sized { +pub trait TestNetFactory: Sized where >::Transaction: Send { type Verifier: 'static + Verifier; + type BlockImport: BlockImport + Clone + Send + Sync + 'static; type PeerData: Default; /// These two need to be implemented! @@ -626,23 +656,20 @@ pub trait TestNetFactory: Sized { ) -> Self::Verifier; /// Get reference to peer. - fn peer(&mut self, i: usize) -> &mut Peer; - fn peers(&self) -> &Vec>; - fn mut_peers>)>( + fn peer(&mut self, i: usize) -> &mut Peer; + fn peers(&self) -> &Vec>; + fn mut_peers>)>( &mut self, closure: F, ); /// Get custom block import handle for fresh client, along with peer data. - fn make_block_import(&self, client: PeersClient) + fn make_block_import(&self, client: PeersClient) -> ( - BlockImportAdapter, + BlockImportAdapter, Option>, Self::PeerData, - ) - { - (client.as_block_import(), None, Default::default()) - } + ); fn default_config() -> ProtocolConfig { ProtocolConfig::default() @@ -686,7 +713,7 @@ pub trait TestNetFactory: Sized { &Default::default(), &data, ); - let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); + let verifier = VerifierAdapter::new(verifier); let import_queue = Box::new(BasicQueue::new( verifier.clone(), @@ -743,7 +770,7 @@ pub trait TestNetFactory: Sized { }; let network = NetworkWorker::new(sc_network::config::Params { - role: Role::Full, + role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, transactions_handler_executor: Box::new(|task| { async_std::task::spawn(task); }), network_config, @@ -774,7 +801,7 @@ pub trait TestNetFactory: Sized { peers.push(Peer { data, - client: PeersClient::Full(client, backend.clone()), + client: PeersClient::Full(client.clone(), backend.clone()), select_chain: Some(longest_chain), backend: Some(backend), imported_blocks_stream, @@ -802,7 +829,7 @@ pub trait TestNetFactory: Sized { &Default::default(), &data, ); - let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); + let verifier = VerifierAdapter::new(verifier); let import_queue = Box::new(BasicQueue::new( verifier.clone(), @@ -984,7 +1011,7 @@ pub trait TestNetFactory: Sized { } pub struct TestNet { - peers: Vec>, + peers: Vec>, fork_choice: ForkChoiceStrategy, } @@ -1001,6 +1028,7 @@ impl TestNet { impl TestNetFactory for TestNet { type Verifier = PassThroughVerifier; type PeerData = (); + type BlockImport = PeersClient; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { @@ -1016,15 +1044,25 @@ impl TestNetFactory for TestNet { PassThroughVerifier::new_with_fork_choice(false, self.fork_choice.clone()) } - fn peer(&mut self, i: usize) -> &mut Peer<()> { + fn make_block_import(&self, client: PeersClient) + -> ( + BlockImportAdapter, + Option>, + Self::PeerData, + ) + { + (client.as_block_import(), None, ()) + } + + fn peer(&mut self, i: usize) -> &mut Peer<(), Self::BlockImport> { &mut self.peers[i] } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec> { &self.peers } - fn mut_peers>)>(&mut self, closure: F) { + fn mut_peers>)>(&mut self, closure: F) { closure(&mut self.peers); } } @@ -1050,6 +1088,7 @@ pub struct JustificationTestNet(TestNet); impl TestNetFactory for JustificationTestNet { type Verifier = PassThroughVerifier; type PeerData = (); + type BlockImport = PeersClient; fn from_config(config: &ProtocolConfig) -> Self { JustificationTestNet(TestNet::from_config(config)) @@ -1059,23 +1098,23 @@ impl TestNetFactory for JustificationTestNet { self.0.make_verifier(client, config, peer_data) } - fn peer(&mut self, i: usize) -> &mut Peer { + fn peer(&mut self, i: usize) -> &mut Peer { self.0.peer(i) } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec> { self.0.peers() } fn mut_peers>, + &mut Vec>, )>(&mut self, closure: F) { self.0.mut_peers(closure) } - fn make_block_import(&self, client: PeersClient) + fn make_block_import(&self, client: PeersClient) -> ( - BlockImportAdapter, + BlockImportAdapter, Option>, Self::PeerData, ) diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index b11dbaca75e14..953639dcc0e22 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -22,6 +22,7 @@ use futures::{Future, executor::block_on}; use super::*; use sp_consensus::block_validation::Validation; use substrate_test_runtime::Header; +use sp_runtime::Justifications; fn test_ancestor_search_when_common_is(n: usize) { sp_tracing::try_init_simple(); @@ -248,13 +249,14 @@ fn sync_justifications() { net.block_until_sync(); // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justification(&BlockId::Number(10)).unwrap(), None); - assert_eq!(net.peer(1).client().justification(&BlockId::Number(10)).unwrap(), None); + assert_eq!(net.peer(0).client().justifications(&BlockId::Number(10)).unwrap(), None); + assert_eq!(net.peer(1).client().justifications(&BlockId::Number(10)).unwrap(), None); // we finalize block #10, #15 and #20 for peer 0 with a justification - net.peer(0).client().finalize_block(BlockId::Number(10), Some(Vec::new()), true).unwrap(); - net.peer(0).client().finalize_block(BlockId::Number(15), Some(Vec::new()), true).unwrap(); - net.peer(0).client().finalize_block(BlockId::Number(20), Some(Vec::new()), true).unwrap(); + let just = (*b"FRNK", Vec::new()); + net.peer(0).client().finalize_block(BlockId::Number(10), Some(just.clone()), true).unwrap(); + net.peer(0).client().finalize_block(BlockId::Number(15), Some(just.clone()), true).unwrap(); + net.peer(0).client().finalize_block(BlockId::Number(20), Some(just.clone()), true).unwrap(); let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); let h2 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap(); @@ -269,10 +271,20 @@ fn sync_justifications() { net.poll(cx); for height in (10..21).step_by(5) { - if net.peer(0).client().justification(&BlockId::Number(height)).unwrap() != Some(Vec::new()) { + if net + .peer(0) + .client() + .justifications(&BlockId::Number(height)) + .unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) + { return Poll::Pending; } - if net.peer(1).client().justification(&BlockId::Number(height)).unwrap() != Some(Vec::new()) { + if net + .peer(1) + .client() + .justifications(&BlockId::Number(height)) + .unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) + { return Poll::Pending; } } @@ -295,7 +307,8 @@ fn sync_justifications_across_forks() { // for both and finalize the small fork instead. net.block_until_sync(); - net.peer(0).client().finalize_block(BlockId::Hash(f1_best), Some(Vec::new()), true).unwrap(); + let just = (*b"FRNK", Vec::new()); + net.peer(0).client().finalize_block(BlockId::Hash(f1_best), Some(just), true).unwrap(); net.peer(1).request_justification(&f1_best, 10); net.peer(1).request_justification(&f2_best, 11); @@ -303,8 +316,16 @@ fn sync_justifications_across_forks() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).client().justification(&BlockId::Number(10)).unwrap() == Some(Vec::new()) && - net.peer(1).client().justification(&BlockId::Number(10)).unwrap() == Some(Vec::new()) + if net + .peer(0) + .client() + .justifications(&BlockId::Number(10)) + .unwrap() == Some(Justifications::from((*b"FRNK", Vec::new()))) + && net + .peer(1) + .client() + .justifications(&BlockId::Number(10)) + .unwrap() == Some(Justifications::from((*b"FRNK", Vec::new()))) { Poll::Ready(()) } else { @@ -696,8 +717,9 @@ fn can_sync_to_peers_with_wrong_common_block() { net.block_until_connected(); // both peers re-org to the same fork without notifying each other - net.peer(0).client().finalize_block(BlockId::Hash(fork_hash), Some(Vec::new()), true).unwrap(); - net.peer(1).client().finalize_block(BlockId::Hash(fork_hash), Some(Vec::new()), true).unwrap(); + let just = Some((*b"FRNK", Vec::new())); + net.peer(0).client().finalize_block(BlockId::Hash(fork_hash), just.clone(), true).unwrap(); + net.peer(1).client().finalize_block(BlockId::Hash(fork_hash), just, true).unwrap(); let final_hash = net.peer(0).push_blocks(1, false); net.block_until_sync(); @@ -948,8 +970,8 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { net.block_until_sync(); // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justification(&BlockId::Number(10)).unwrap(), None); - assert_eq!(net.peer(1).client().justification(&BlockId::Number(10)).unwrap(), None); + assert_eq!(net.peer(0).client().justifications(&BlockId::Number(10)).unwrap(), None); + assert_eq!(net.peer(1).client().justifications(&BlockId::Number(10)).unwrap(), None); let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); @@ -967,12 +989,21 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { } // Finalize the block and make the justification available. - net.peer(0).client().finalize_block(BlockId::Number(10), Some(Vec::new()), true).unwrap(); + net.peer(0).client().finalize_block( + BlockId::Number(10), + Some((*b"FRNK", Vec::new())), + true, + ).unwrap(); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(1).client().justification(&BlockId::Number(10)).unwrap() != Some(Vec::new()) { + if net + .peer(1) + .client() + .justifications(&BlockId::Number(10)) + .unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) + { return Poll::Pending; } diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 717f02eccd5dc..26975edbd6b63 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -240,6 +240,7 @@ mod tests { use sp_consensus::BlockOrigin; use sc_client_api::Backend as _; use sc_block_builder::BlockBuilderProvider as _; + use futures::executor::block_on; struct TestNetwork(); @@ -331,7 +332,7 @@ mod tests { ).unwrap(); let block = block_builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert_eq!(value, &offchain_db.get(sp_offchain::STORAGE_PREFIX, &key).unwrap()); @@ -341,7 +342,7 @@ mod tests { ).unwrap(); let block = block_builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert!(offchain_db.get(sp_offchain::STORAGE_PREFIX, &key).is_none()); } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 536ec6b681753..984bfc5e835ff 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.9" -libp2p = { version = "0.35.1", default-features = false } +libp2p = { version = "0.36.0", default-features = false } sp-utils = { version = "3.0.0", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index 31162930efc67..153e097dc8b46 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -296,7 +296,10 @@ impl Peerset { } } - peerset.alloc_slots(); + for set_index in 0..peerset.data.num_sets() { + peerset.alloc_slots(SetId(set_index)); + } + (peerset, handle) } @@ -307,7 +310,7 @@ impl Peerset { } self.data.add_no_slot_node(set_id.0, peer_id); - self.alloc_slots(); + self.alloc_slots(set_id); } fn on_remove_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { @@ -372,7 +375,7 @@ impl Peerset { } } else { - self.alloc_slots(); + self.alloc_slots(set_id); } } @@ -383,7 +386,7 @@ impl Peerset { pub fn add_to_peers_set(&mut self, set_id: SetId, peer_id: PeerId) { if let peersstate::Peer::Unknown(entry) = self.data.peer(set_id.0, &peer_id) { entry.discover(); - self.alloc_slots(); + self.alloc_slots(set_id); } } @@ -500,59 +503,68 @@ impl Peerset { } } - /// Try to fill available out slots with nodes. - fn alloc_slots(&mut self) { + /// Try to fill available out slots with nodes for the given set. + fn alloc_slots(&mut self, set_id: SetId) { self.update_time(); // Try to connect to all the reserved nodes that we are not connected to. - for set_index in 0..self.data.num_sets() { - for reserved_node in &self.reserved_nodes[set_index].0 { - let entry = match self.data.peer(set_index, reserved_node) { - peersstate::Peer::Unknown(n) => n.discover(), - peersstate::Peer::NotConnected(n) => n, - peersstate::Peer::Connected(_) => continue, - }; - - match entry.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect { - set_id: SetId(set_index), - peer_id: conn.into_peer_id() - }), - Err(_) => { - // An error is returned only if no slot is available. Reserved nodes are - // marked in the state machine with a flag saying "doesn't occupy a slot", - // and as such this should never happen. - debug_assert!(false); - log::error!( - target: "peerset", - "Not enough slots to connect to reserved node" - ); - } + for reserved_node in &self.reserved_nodes[set_id.0].0 { + let entry = match self.data.peer(set_id.0, reserved_node) { + peersstate::Peer::Unknown(n) => n.discover(), + peersstate::Peer::NotConnected(n) => n, + peersstate::Peer::Connected(_) => continue, + }; + + match entry.try_outgoing() { + Ok(conn) => self.message_queue.push_back(Message::Connect { + set_id, + peer_id: conn.into_peer_id() + }), + Err(_) => { + // An error is returned only if no slot is available. Reserved nodes are + // marked in the state machine with a flag saying "doesn't occupy a slot", + // and as such this should never happen. + debug_assert!(false); + log::error!( + target: "peerset", + "Not enough slots to connect to reserved node" + ); } } } // Now, we try to connect to other nodes. - for set_index in 0..self.data.num_sets() { - // Nothing more to do if we're in reserved mode. - if self.reserved_nodes[set_index].1 { - continue; + + // Nothing more to do if we're in reserved mode. + if self.reserved_nodes[set_id.0].1 { + return; + } + + // Try to grab the next node to attempt to connect to. + // Since `highest_not_connected_peer` is rather expensive to call, check beforehand + // whether we have an available slot. + while self.data.has_free_outgoing_slot(set_id.0) { + let next = match self.data.highest_not_connected_peer(set_id.0) { + Some(n) => n, + None => break + }; + + // Don't connect to nodes with an abysmal reputation. + if next.reputation() < BANNED_THRESHOLD { + break; } - // Try to grab the next node to attempt to connect to. - while let Some(next) = self.data.highest_not_connected_peer(set_index) { - // Don't connect to nodes with an abysmal reputation. - if next.reputation() < BANNED_THRESHOLD { + match next.try_outgoing() { + Ok(conn) => self.message_queue.push_back(Message::Connect { + set_id, + peer_id: conn.into_peer_id() + }), + Err(_) => { + // This branch can only be entered if there is no free slot, which is + // checked above. + debug_assert!(false); break; } - - match next.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect { - set_id: SetId(set_index), - peer_id: conn.into_peer_id() - }), - Err(_) => break, // No more slots available. - } } } } @@ -624,7 +636,7 @@ impl Peerset { self.on_remove_from_peers_set(set_id, peer_id); } - self.alloc_slots(); + self.alloc_slots(set_id); } /// Reports an adjustment to the reputation of the given peer. diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs index c79dac5e10a7b..c200d2729e16c 100644 --- a/client/peerset/src/peersstate.rs +++ b/client/peerset/src/peersstate.rs @@ -283,6 +283,11 @@ impl PeersState { } } + /// Returns `true` if there is a free outgoing slot available related to this set. + pub fn has_free_outgoing_slot(&self, set: usize) -> bool { + self.sets[set].num_out < self.sets[set].max_out + } + /// Add a node to the list of nodes that don't occupy slots. /// /// Has no effect if the node was already in the group. @@ -506,9 +511,7 @@ impl<'a> NotConnectedPeer<'a> { // Note that it is possible for num_out to be strictly superior to the max, in case we were // connected to reserved node then marked them as not reserved. - if self.state.sets[self.set].num_out >= self.state.sets[self.set].max_out - && !is_no_slot_occupy - { + if !self.state.has_free_outgoing_slot(self.set) && !is_no_slot_occupy { return Err(self); } diff --git a/client/rpc/src/chain/chain_light.rs b/client/rpc/src/chain/chain_light.rs index 41d4d02e33c9d..a3f3db9b7116c 100644 --- a/client/rpc/src/chain/chain_light.rs +++ b/client/rpc/src/chain/chain_light.rs @@ -106,7 +106,7 @@ impl ChainBackend for LightChain( hash, header: Some(header), body: Some(extrinsics), - justification: signed_block.justification, + justifications: signed_block.justifications, origin: None, allow_missing_state: false, import_existing: force, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 07e8e005fa1a8..f975961c3b4e5 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -41,7 +41,7 @@ use sc_telemetry::{ SUBSTRATE_INFO, }; use sp_runtime::{ - Justification, BuildStorage, + Justification, Justifications, BuildStorage, generic::{BlockId, SignedBlock, DigestItem}, traits::{ Block as BlockT, Header as HeaderT, Zero, NumberFor, @@ -625,7 +625,7 @@ impl Client where let BlockImportParams { origin, header, - justification, + justifications, post_digests, body, storage_changes, @@ -637,7 +637,7 @@ impl Client where .. } = import_block; - assert!(justification.is_some() && finalized || justification.is_none()); + assert!(justifications.is_some() && finalized || justifications.is_none()); if !intermediates.is_empty() { return Err(Error::IncompletePipeline) @@ -665,7 +665,7 @@ impl Client where origin, hash, import_headers, - justification, + justifications, body, storage_changes, new_cache, @@ -704,7 +704,7 @@ impl Client where origin: BlockOrigin, hash: Block::Hash, import_headers: PrePostHeader, - justification: Option, + justifications: Option, body: Option>, storage_changes: Option, Block>>, new_cache: HashMap>, @@ -767,6 +767,7 @@ impl Client where offchain_sc, tx, _, changes_trie_tx, + tx_index, ) = storage_changes.into_inner(); if self.config.offchain_indexing_api { @@ -775,6 +776,7 @@ impl Client where operation.op.update_db_storage(tx)?; operation.op.update_storage(main_sc.clone(), child_sc.clone())?; + operation.op.update_transaction_index(tx_index)?; if let Some(changes_trie_transaction) = changes_trie_tx { operation.op.update_changes_trie(changes_trie_transaction)?; @@ -820,7 +822,7 @@ impl Client where operation.op.set_block_data( import_headers.post().clone(), body, - justification, + justifications, leaf_state, )?; @@ -1696,6 +1698,7 @@ impl CallApiAt for Client where /// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport /// objects. Otherwise, importing blocks directly into the client would be bypassing /// important verification work. +#[async_trait::async_trait] impl sp_consensus::BlockImport for &Client where B: backend::Backend, E: CallExecutor + Send + Sync, @@ -1703,6 +1706,8 @@ impl sp_consensus::BlockImport for &Client: ProvideRuntimeApi, as ProvideRuntimeApi>::Api: CoreApi + ApiExt, + RA: Sync + Send, + backend::TransactionFor: Send + 'static, { type Error = ConsensusError; type Transaction = backend::TransactionFor; @@ -1716,7 +1721,7 @@ impl sp_consensus::BlockImport for &Client>, new_cache: HashMap>, @@ -1740,7 +1745,7 @@ impl sp_consensus::BlockImport for &Client, ) -> Result { @@ -1796,6 +1801,7 @@ impl sp_consensus::BlockImport for &Client sp_consensus::BlockImport for Client where B: backend::Backend, E: CallExecutor + Send + Sync, @@ -1803,23 +1809,25 @@ impl sp_consensus::BlockImport for Client, >::Api: CoreApi + ApiExt, + RA: Sync + Send, + backend::TransactionFor: Send + 'static, { type Error = ConsensusError; type Transaction = backend::TransactionFor; - fn import_block( + async fn import_block( &mut self, import_block: BlockImportParams, new_cache: HashMap>, ) -> Result { - (&*self).import_block(import_block, new_cache) + (&*self).import_block(import_block, new_cache).await } - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - (&*self).check_block(block) + (&*self).check_block(block).await } } @@ -1926,9 +1934,9 @@ impl BlockBackend for Client } fn block(&self, id: &BlockId) -> sp_blockchain::Result>> { - Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) { - (Some(header), Some(extrinsics), justification) => - Some(SignedBlock { block: Block::new(header, extrinsics), justification }), + Ok(match (self.header(id)?, self.body(id)?, self.justifications(id)?) { + (Some(header), Some(extrinsics), justifications) => + Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), _ => None, }) } @@ -1937,20 +1945,20 @@ impl BlockBackend for Client Client::block_status(self, id) } - fn justification(&self, id: &BlockId) -> sp_blockchain::Result> { - self.backend.blockchain().justification(*id) + fn justifications(&self, id: &BlockId) -> sp_blockchain::Result> { + self.backend.blockchain().justifications(*id) } fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result> { self.backend.blockchain().hash(number) } - fn extrinsic(&self, hash: &Block::Hash) -> sp_blockchain::Result> { - self.backend.blockchain().extrinsic(hash) + fn indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result>> { + self.backend.blockchain().indexed_transaction(hash) } - fn have_extrinsic(&self, hash: &Block::Hash) -> sp_blockchain::Result { - self.backend.blockchain().have_extrinsic(hash) + fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { + self.backend.blockchain().has_indexed_transaction(hash) } } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index e55320d6c5fb7..2108d7e26fa83 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -28,7 +28,7 @@ sp-trie = { version = "3.0.0", path = "../../../primitives/trie" } sp-storage = { version = "3.0.0", path = "../../../primitives/storage" } sc-client-db = { version = "0.9.0", default-features = false, path = "../../db" } futures = { version = "0.3.1", features = ["compat"] } -sc-service = { version = "0.9.0", default-features = false, features = ["test-helpers"], path = "../../service" } +sc-service = { version = "0.9.0", features = ["test-helpers"], path = "../../service" } sc-network = { version = "0.9.0", path = "../../network" } sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index f5b2d4aac83d3..a183cbce62bdb 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -28,8 +28,9 @@ use sc_light::{ }; use std::sync::Arc; use sp_runtime::{ - traits::{BlakeTwo256, HashFor, NumberFor}, - generic::BlockId, traits::{Block as _, Header as HeaderT}, Digest, + generic::BlockId, + traits::{BlakeTwo256, Block as _, HashFor, Header as HeaderT, NumberFor}, + Digest, Justifications, }; use std::collections::HashMap; use parking_lot::Mutex; @@ -374,11 +375,11 @@ fn execution_proof_is_generated_and_checked() { for i in 1u32..3u32 { let mut digest = Digest::default(); digest.push(sp_runtime::generic::DigestItem::Other::(i.to_le_bytes().to_vec())); - remote_client.import_justified( + futures::executor::block_on(remote_client.import_justified( BlockOrigin::Own, remote_client.new_block(digest).unwrap().build().unwrap().block, - Default::default(), - ).unwrap(); + Justifications::from((*b"TEST", Default::default())), + )).unwrap(); } // check method that doesn't requires environment @@ -539,7 +540,7 @@ fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Heade let mut local_headers_hashes = Vec::new(); for i in 0..4 { let block = remote_client.new_block(Default::default()).unwrap().build().unwrap().block; - remote_client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(remote_client.import(BlockOrigin::Own, block)).unwrap(); local_headers_hashes.push( remote_client.block_hash(i + 1) .map_err(|_| ClientError::Backend("TestError".into())) diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 122782ee51ef5..0234f43513d56 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -36,8 +36,9 @@ use sc_client_db::{ }; use sc_block_builder::BlockBuilderProvider; use sc_service::client::{self, Client, LocalCallExecutor, new_in_mem}; -use sp_runtime::traits::{ - BlakeTwo256, Block as BlockT, Header as HeaderT, +use sp_runtime::{ + ConsensusEngineId, + traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}, }; use substrate_test_runtime::TestAPI; use sp_state_machine::backend::Backend as _; @@ -51,12 +52,15 @@ use sp_consensus::{ }; use sp_storage::StorageKey; use sp_trie::{TrieConfiguration, trie_types::Layout}; -use sp_runtime::{generic::BlockId, DigestItem}; +use sp_runtime::{generic::BlockId, DigestItem, Justifications}; use hex_literal::hex; +use futures::executor::block_on; mod light; mod db; +const TEST_ENGINE_ID: ConsensusEngineId = *b"TEST"; + native_executor_instance!( Executor, substrate_test_runtime_client::runtime::api::dispatch, @@ -105,7 +109,7 @@ pub fn prepare_client_with_key_changes() -> ( }).unwrap(); } let block = builder.build().unwrap().block; - remote_client.import(BlockOrigin::Own, block).unwrap(); + block_on(remote_client.import(BlockOrigin::Own, block)).unwrap(); let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap(); let trie_root = header.digest().log(DigestItem::as_changes_trie_root) @@ -360,7 +364,7 @@ fn block_builder_works_with_no_transactions() { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert_eq!(client.chain_info().best_number, 1); } @@ -379,7 +383,7 @@ fn block_builder_works_with_transactions() { }).unwrap(); let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert_eq!(client.chain_info().best_number, 1); assert_ne!( @@ -425,7 +429,7 @@ fn block_builder_does_not_include_invalid() { ); let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert_eq!(client.chain_info().best_number, 1); assert_ne!( @@ -473,11 +477,11 @@ fn uncles_with_only_ancestors() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let v: Vec = Vec::new(); assert_eq!(v, client.uncles(a2.hash(), 3).unwrap()); } @@ -493,7 +497,7 @@ fn uncles_with_multiple_forks() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block_at( @@ -501,7 +505,7 @@ fn uncles_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 let a3 = client.new_block_at( @@ -509,7 +513,7 @@ fn uncles_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 let a4 = client.new_block_at( @@ -517,7 +521,7 @@ fn uncles_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 let a5 = client.new_block_at( @@ -525,7 +529,7 @@ fn uncles_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 let mut builder = client.new_block_at( @@ -541,7 +545,7 @@ fn uncles_with_multiple_forks() { nonce: 0, }).unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 let b3 = client.new_block_at( @@ -549,7 +553,7 @@ fn uncles_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 let b4 = client.new_block_at( @@ -557,7 +561,7 @@ fn uncles_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // // B2 -> C3 let mut builder = client.new_block_at( @@ -573,7 +577,7 @@ fn uncles_with_multiple_forks() { nonce: 1, }).unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 let mut builder = client.new_block_at( @@ -589,7 +593,7 @@ fn uncles_with_multiple_forks() { nonce: 0, }).unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; @@ -621,11 +625,11 @@ fn best_containing_on_longest_chain_with_single_chain_3_blocks() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; @@ -645,7 +649,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block_at( @@ -653,7 +657,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 let a3 = client.new_block_at( @@ -661,7 +665,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 let a4 = client.new_block_at( @@ -669,7 +673,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 let a5 = client.new_block_at( @@ -677,7 +681,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 let mut builder = client.new_block_at( @@ -693,7 +697,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { nonce: 0, }).unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 let b3 = client.new_block_at( @@ -701,7 +705,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 let b4 = client.new_block_at( @@ -709,7 +713,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // // B2 -> C3 let mut builder = client.new_block_at( @@ -725,7 +729,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { nonce: 1, }).unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 let mut builder = client.new_block_at( @@ -741,7 +745,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { nonce: 0, }).unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); assert_eq!(client.chain_info().best_hash, a5.hash()); @@ -949,11 +953,15 @@ fn best_containing_on_longest_chain_with_multiple_forks() { assert_eq!(None, longest_chain_select.finality_target( b4.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - c3.hash().clone(), Some(0)).unwrap()); + assert_eq!( + None, + longest_chain_select.finality_target(c3.hash().clone(), Some(0)).unwrap(), + ); - assert_eq!(None, longest_chain_select.finality_target( - d2.hash().clone(), Some(0)).unwrap()); + assert_eq!( + None, + longest_chain_select.finality_target(d2.hash().clone(), Some(0)).unwrap(), + ); } #[test] @@ -965,15 +973,18 @@ fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; - assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, Some(10)).unwrap().unwrap()); + assert_eq!( + a2.hash(), + longest_chain_select.finality_target(genesis_hash, Some(10)).unwrap().unwrap(), + ); } #[test] @@ -1005,7 +1016,7 @@ fn import_with_justification() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block_at( @@ -1013,16 +1024,16 @@ fn import_with_justification() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let justification = vec![1, 2, 3]; + let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); let a3 = client.new_block_at( &BlockId::Hash(a2.hash()), Default::default(), false, ).unwrap().build().unwrap().block; - client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone()).unwrap(); + block_on(client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone())).unwrap(); assert_eq!( client.chain_info().finalized_hash, @@ -1030,17 +1041,17 @@ fn import_with_justification() { ); assert_eq!( - client.justification(&BlockId::Hash(a3.hash())).unwrap(), + client.justifications(&BlockId::Hash(a3.hash())).unwrap(), Some(justification), ); assert_eq!( - client.justification(&BlockId::Hash(a1.hash())).unwrap(), + client.justifications(&BlockId::Hash(a1.hash())).unwrap(), None, ); assert_eq!( - client.justification(&BlockId::Hash(a2.hash())).unwrap(), + client.justifications(&BlockId::Hash(a2.hash())).unwrap(), None, ); } @@ -1057,14 +1068,14 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client.new_block_at( &BlockId::Hash(a1.hash()), Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client.new_block_at( &BlockId::Number(0), @@ -1088,8 +1099,8 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { ); // importing B1 as finalized should trigger a re-org and set it as new best - let justification = vec![1, 2, 3]; - client.import_justified(BlockOrigin::Own, b1.clone(), justification).unwrap(); + let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); + block_on(client.import_justified(BlockOrigin::Own, b1.clone(), justification)).unwrap(); assert_eq!( client.chain_info().best_hash, @@ -1114,14 +1125,14 @@ fn finalizing_diverged_block_should_trigger_reorg() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client.new_block_at( &BlockId::Hash(a1.hash()), Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client.new_block_at( &BlockId::Number(0), @@ -1136,14 +1147,14 @@ fn finalizing_diverged_block_should_trigger_reorg() { nonce: 0, }).unwrap(); let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::Own, b1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); let b2 = client.new_block_at( &BlockId::Hash(b1.hash()), Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // A2 is the current best since it's the longest chain assert_eq!( @@ -1181,7 +1192,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); assert_eq!( client.chain_info().best_hash, @@ -1224,7 +1235,7 @@ fn state_reverted_on_reorg() { nonce: 0, }).unwrap(); let a1 = a1.build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let mut b1 = client.new_block_at( &BlockId::Number(0), @@ -1239,7 +1250,7 @@ fn state_reverted_on_reorg() { }).unwrap(); let b1 = b1.build().unwrap().block; // Reorg to B1 - client.import_as_best(BlockOrigin::Own, b1.clone()).unwrap(); + block_on(client.import_as_best(BlockOrigin::Own, b1.clone())).unwrap(); assert_eq!(950, current_balance(&client)); let mut a2 = client.new_block_at( @@ -1255,7 +1266,7 @@ fn state_reverted_on_reorg() { }).unwrap(); let a2 = a2.build().unwrap().block; // Re-org to A2 - client.import_as_best(BlockOrigin::Own, a2).unwrap(); + block_on(client.import_as_best(BlockOrigin::Own, a2)).unwrap(); assert_eq!(980, current_balance(&client)); } @@ -1294,14 +1305,14 @@ fn doesnt_import_blocks_that_revert_finality() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client.new_block_at( &BlockId::Hash(a1.hash()), Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -1313,11 +1324,11 @@ fn doesnt_import_blocks_that_revert_finality() { nonce: 0, }).unwrap(); let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::Own, b1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); let b2 = client.new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // prepare B3 before we finalize A2, because otherwise we won't be able to // read changes trie configuration after A2 is finalized @@ -1328,7 +1339,7 @@ fn doesnt_import_blocks_that_revert_finality() { // B3 at the same height but that doesn't include it ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); - let import_err = client.import(BlockOrigin::Own, b3).err().unwrap(); + let import_err = block_on(client.import(BlockOrigin::Own, b3)).err().unwrap(); let expected_err = ConsensusError::ClientImport( sp_blockchain::Error::RuntimeApiError( sp_api::ApiError::Application(Box::new(sp_blockchain::Error::NotInFinalizedChain)) @@ -1353,7 +1364,7 @@ fn doesnt_import_blocks_that_revert_finality() { }).unwrap(); let c1 = c1.build().unwrap().block; - let import_err = client.import(BlockOrigin::Own, c1).err().unwrap(); + let import_err = block_on(client.import(BlockOrigin::Own, c1)).err().unwrap(); let expected_err = ConsensusError::ClientImport( sp_blockchain::Error::NotInFinalizedChain.to_string() ); @@ -1364,7 +1375,6 @@ fn doesnt_import_blocks_that_revert_finality() { ); } - #[test] fn respects_block_rules() { fn run_test( @@ -1393,7 +1403,7 @@ fn respects_block_rules() { allow_missing_state: false, import_existing: false, }; - assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); + assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); // this is 0x0d6d6612a10485370d9e085aeea7ec427fb3f34d961c6a816cdbe5cde2278864 let mut block_not_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) @@ -1411,11 +1421,11 @@ fn respects_block_rules() { if record_only { known_bad.insert(block_not_ok.hash()); } else { - assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); + assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::KnownBad); } // Now going to the fork - client.import_as_final(BlockOrigin::Own, block_ok).unwrap(); + block_on(client.import_as_final(BlockOrigin::Own, block_ok)).unwrap(); // And check good fork let mut block_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) @@ -1433,7 +1443,7 @@ fn respects_block_rules() { if record_only { fork_rules.push((1, block_ok.hash().clone())); } - assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); + assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); // And now try bad fork let mut block_not_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) @@ -1450,7 +1460,7 @@ fn respects_block_rules() { }; if !record_only { - assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); + assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::KnownBad); } } @@ -1488,8 +1498,11 @@ fn returns_status_for_pruned_blocks() { let mut client = TestClientBuilder::with_backend(backend).build(); - let a1 = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap().build().unwrap().block; + let a1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap().build().unwrap().block; let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -1510,17 +1523,32 @@ fn returns_status_for_pruned_blocks() { import_existing: false, }; - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::imported(false)); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::Unknown); + assert_eq!( + block_on(client.check_block(check_block_a1.clone())).unwrap(), + ImportResult::imported(false), + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), + BlockStatus::Unknown, + ); - client.import_as_final(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import_as_final(BlockOrigin::Own, a1.clone())).unwrap(); - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainWithState); + assert_eq!( + block_on(client.check_block(check_block_a1.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), + BlockStatus::InChainWithState, + ); - let a2 = client.new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - client.import_as_final(BlockOrigin::Own, a2.clone()).unwrap(); + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + block_on(client.import_as_final(BlockOrigin::Own, a2.clone())).unwrap(); let check_block_a2 = BlockCheckParams { hash: a2.hash().clone(), @@ -1530,15 +1558,30 @@ fn returns_status_for_pruned_blocks() { import_existing: false, }; - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainWithState); + assert_eq!( + block_on(client.check_block(check_block_a1.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), + BlockStatus::InChainPruned, + ); + assert_eq!( + block_on(client.check_block(check_block_a2.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), + BlockStatus::InChainWithState, + ); - let a3 = client.new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) - .unwrap().build().unwrap().block; + let a3 = client.new_block_at( + &BlockId::Hash(a2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; - client.import_as_final(BlockOrigin::Own, a3.clone()).unwrap(); + block_on(client.import_as_final(BlockOrigin::Own, a3.clone())).unwrap(); let check_block_a3 = BlockCheckParams { hash: a3.hash().clone(), number: 2, @@ -1548,12 +1591,30 @@ fn returns_status_for_pruned_blocks() { }; // a1 and a2 are both pruned at this point - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a3.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a3.hash)).unwrap(), BlockStatus::InChainWithState); + assert_eq!( + block_on(client.check_block(check_block_a1.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), + BlockStatus::InChainPruned, + ); + assert_eq!( + block_on(client.check_block(check_block_a2.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), + BlockStatus::InChainPruned, + ); + assert_eq!( + block_on(client.check_block(check_block_a3.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a3.hash)).unwrap(), + BlockStatus::InChainWithState, + ); let mut check_block_b1 = BlockCheckParams { hash: b1.hash().clone(), @@ -1562,11 +1623,20 @@ fn returns_status_for_pruned_blocks() { allow_missing_state: false, import_existing: false, }; - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::MissingState); + assert_eq!( + block_on(client.check_block(check_block_b1.clone())).unwrap(), + ImportResult::MissingState, + ); check_block_b1.allow_missing_state = true; - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::imported(false)); + assert_eq!( + block_on(client.check_block(check_block_b1.clone())).unwrap(), + ImportResult::imported(false), + ); check_block_b1.parent_hash = H256::random(); - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::UnknownParent); + assert_eq!( + block_on(client.check_block(check_block_b1.clone())).unwrap(), + ImportResult::UnknownParent, + ); } #[test] @@ -1597,18 +1667,18 @@ fn imports_blocks_with_changes_tries_config_change() { (1..11).for_each(|number| { let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (11..12).for_each(|number| { let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (12..23).for_each(|number| { let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (23..24).for_each(|number| { let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); @@ -1617,24 +1687,24 @@ fn imports_blocks_with_changes_tries_config_change() { digest_levels: 1, })).unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (24..26).for_each(|number| { let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (26..27).for_each(|number| { let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (27..28).for_each(|number| { let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (28..29).for_each(|number| { let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); @@ -1643,23 +1713,23 @@ fn imports_blocks_with_changes_tries_config_change() { digest_levels: 1, })).unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (29..30).for_each(|number| { let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (30..31).for_each(|number| { let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (31..32).for_each(|number| { let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); // now check that configuration cache works @@ -1775,7 +1845,7 @@ fn cleans_up_closed_notification_sinks_on_block_import() { let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - client.import_block(import, Default::default()).unwrap(); + block_on(client.import_block(import, Default::default())).unwrap(); }; // after importing a block we should still have 4 notification sinks @@ -1818,14 +1888,14 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::NetworkInitialSync, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::NetworkInitialSync, a1.clone())).unwrap(); let a2 = client.new_block_at( &BlockId::Hash(a1.hash()), Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::NetworkInitialSync, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::NetworkInitialSync, a2.clone())).unwrap(); let mut b1 = client.new_block_at( &BlockId::Number(0), @@ -1840,7 +1910,7 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi nonce: 0, }).unwrap(); let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::NetworkInitialSync, b1.clone()).unwrap(); + block_on(client.import(BlockOrigin::NetworkInitialSync, b1.clone())).unwrap(); let b2 = client.new_block_at( &BlockId::Hash(b1.hash()), @@ -1849,7 +1919,7 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi ).unwrap().build().unwrap().block; // Should trigger a notification because we reorg - client.import_as_best(BlockOrigin::NetworkInitialSync, b2.clone()).unwrap(); + block_on(client.import_as_best(BlockOrigin::NetworkInitialSync, b2.clone())).unwrap(); // There should be one notification let notification = notification_stream.next().unwrap(); diff --git a/client/sync-state-rpc/src/lib.rs b/client/sync-state-rpc/src/lib.rs index 8466523116440..4cb4955995540 100644 --- a/client/sync-state-rpc/src/lib.rs +++ b/client/sync-state-rpc/src/lib.rs @@ -37,7 +37,7 @@ type SharedEpochChanges = sc_consensus_epochs::SharedEpochChanges { #[error(transparent)] Blockchain(#[from] sp_blockchain::Error), - + #[error("Failed to load the block weight for block {0:?}")] LoadingBlockWeightFailed(::Hash), @@ -94,7 +94,7 @@ impl SyncStateRpcHandler chain_spec, client, shared_authority_set, shared_epoch_changes, deny_unsafe, } } - + fn build_sync_state(&self) -> Result, Error> { let finalized_hash = self.client.info().finalized_hash; let finalized_header = self.client.header(BlockId::Hash(finalized_hash))? @@ -108,7 +108,7 @@ impl SyncStateRpcHandler Ok(sc_chain_spec::LightSyncState { finalized_block_header: finalized_header, - babe_epoch_changes: self.shared_epoch_changes.lock().clone(), + babe_epoch_changes: self.shared_epoch_changes.shared_data().clone(), babe_finalized_block_weight: finalized_block_weight, grandpa_authority_set: self.shared_authority_set.clone_inner(), }) diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index ac64d34bea9fe..ab02104c15c35 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] parking_lot = "0.11.1" futures = "0.3.9" wasm-timer = "0.2.5" -libp2p = { version = "0.35.1", default-features = false, features = ["dns", "tcp-async-io", "wasm-ext", "websocket"] } +libp2p = { version = "0.36.0", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.8" pin-project = "1.0.4" rand = "0.7.2" diff --git a/client/telemetry/src/transport.rs b/client/telemetry/src/transport.rs index e32a29d9a950d..0aed263a7275d 100644 --- a/client/telemetry/src/transport.rs +++ b/client/telemetry/src/transport.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . use futures::{ + executor::block_on, prelude::*, ready, task::{Context, Poll}, @@ -47,7 +48,7 @@ pub(crate) fn initialize_transport( // an external transport on desktop and the fallback is used all the time. #[cfg(not(target_os = "unknown"))] let transport = transport.or_transport({ - let inner = libp2p::dns::DnsConfig::new(libp2p::tcp::TcpConfig::new())?; + let inner = block_on(libp2p::dns::DnsConfig::system(libp2p::tcp::TcpConfig::new()))?; libp2p::websocket::framed::WsConfig::new(inner).and_then(|connec, _| { let connec = connec .with(|item| { diff --git a/client/tracing/src/logging/event_format.rs b/client/tracing/src/logging/event_format.rs index 25fd2f3ba3d70..5e7a5246cca00 100644 --- a/client/tracing/src/logging/event_format.rs +++ b/client/tracing/src/logging/event_format.rs @@ -43,6 +43,8 @@ pub struct EventFormat { pub display_thread_name: bool, /// Enable ANSI terminal colors for formatted output. pub enable_color: bool, + /// Duplicate INFO, WARN and ERROR messages to stdout. + pub dup_to_stdout: bool, } impl EventFormat @@ -123,7 +125,19 @@ where writer: &mut dyn fmt::Write, event: &Event, ) -> fmt::Result { - self.format_event_custom(CustomFmtContext::FmtContext(ctx), writer, event) + if self.dup_to_stdout && ( + event.metadata().level() == &Level::INFO || + event.metadata().level() == &Level::WARN || + event.metadata().level() == &Level::ERROR + ) { + let mut out = String::new(); + self.format_event_custom(CustomFmtContext::FmtContext(ctx), &mut out, event)?; + writer.write_str(&out)?; + print!("{}", out); + Ok(()) + } else { + self.format_event_custom(CustomFmtContext::FmtContext(ctx), writer, event) + } } } diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs index 187b6a387f328..1023879e3d7f0 100644 --- a/client/tracing/src/logging/mod.rs +++ b/client/tracing/src/logging/mod.rs @@ -167,6 +167,7 @@ where display_level: !simple, display_thread_name: !simple, enable_color, + dup_to_stdout: !atty::is(atty::Stream::Stderr) && atty::is(atty::Stream::Stdout), }; let builder = FmtSubscriber::builder().with_env_filter(env_filter); diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index a41632ed8de88..063947b383d03 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -985,7 +985,7 @@ fn import_notification_to_pool_maintain_works() { let mut block_builder = client.new_block(Default::default()).unwrap(); block_builder.push(xt).unwrap(); let block = block_builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); // Get the notification of the block import and maintain the pool with it, // Now, the pool should not contain any transactions. diff --git a/docs/CONTRIBUTING.adoc b/docs/CONTRIBUTING.adoc index 6262ed9086a5e..c0f43f01f4130 100644 --- a/docs/CONTRIBUTING.adoc +++ b/docs/CONTRIBUTING.adoc @@ -35,12 +35,21 @@ A PR needs to be reviewed and approved by project maintainers unless: *Process:* -. Please tag each PR with exactly one `A`, `B` and `C` label at the minimum. +. Please tag each PR with exactly one `A`, `B`, `C` and `D` label at the minimum. . Once a PR is ready for review please add the https://github.com/paritytech/substrate/pulls?q=is%3Apr+is%3Aopen+label%3AA0-pleasereview[`A0-pleasereview`] label. Generally PRs should sit with this label for 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. . If the first review is not an approval, swap `A0-pleasereview` to any label `[A3, A7]` to indicate that the PR has received some feedback, but needs further work. For example. https://github.com/paritytech/substrate/labels/A3-inprogress[`A3-inprogress`] is a general indicator that the PR is work in progress and https://github.com/paritytech/substrate/labels/A4-gotissues[`A4-gotissues`] means that it has significant problems that need fixing. Once the work is done, change the label back to `A0-pleasereview`. You might end up swapping a few times back and forth to climb up the A label group. Once a PR is https://github.com/paritytech/substrate/labels/A8-mergeoncegreen[`A8-mergeoncegreen`], it is ready to merge. -. PRs must be tagged with respect to _release notes_ with https://github.com/paritytech/substrate/labels/B0-silent[`B0-silent`] and `B1-..`. The former indicates that no changes should be mentioned in any release notes. The latter indicates that the changes should be reported in the corresponding release note -. PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/D2-breaksapi[`D2-breaksapi`], when it changes the FRAME or consensus of running system with https://github.com/paritytech/substrate/labels/B3-breaksconsensus[`B3-breaksconsensus`]. -. PRs should be labeled with their release importance via the `C1-C9`. +. PRs must be tagged with their release notes requirements via the `B1-B9` labels. +. PRs must be tagged with their release importance via the `C1-C9` labels. +. PRs must be tagged with their audit requirements via the `D1-D9` labels. +. PRs that must be backported to a stable branch must be tagged with https://github.com/paritytech/substrate/labels/E1-runtimemigration[`E0-patchthis`]. +. PRs that introduce runtime migrations must be tagged with https://github.com/paritytech/substrate/labels/E1-runtimemigration[`E1-runtimemigration`]. +. PRs that introduce irreversible database migrations must be tagged with https://github.com/paritytech/substrate/labels/E2-databasemigration[`E2-databasemigration`]. +. PRs that add host functions must be tagged with with https://github.com/paritytech/substrate/labels/E4-newhostfunctions[`E4-newhostfunctions`]. +. PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/E5-breaksapi[`E5-breaksapi`]. +. PRs that materially change the FRAME/runtime semantics must be tagged with https://github.com/paritytech/substrate/labels/E6-transactionversion[`E6-transactionversion`]. +. PRs that change the mechanism for block authoring in a backwards-incompatible way must be tagged with https://github.com/paritytech/substrate/labels/E7-breaksauthoring[`E7-breaksauthoring`]. +. PRs that "break everything" must be tagged with https://github.com/paritytech/substrate/labels/E8-breakseverything[`E8-breakseverything`]. +. PRs that block a new release must be tagged with https://github.com/paritytech/substrate/labels/E9-blocker%20%E2%9B%94%EF%B8%8F[`E9-blocker`]. . PRs should be categorized into projects. . No PR should be merged until all reviews' comments are addressed and CI is successful. diff --git a/docs/node-template-release.md b/docs/node-template-release.md new file mode 100644 index 0000000000000..25834ae99f438 --- /dev/null +++ b/docs/node-template-release.md @@ -0,0 +1,78 @@ +# Substrate Node Template Release Process + +1. This release process has to be run in a github checkout Substrate directory with your work +committed into `https://github.com/paritytech/substrate/`, because the build script will check +the existence of your current git commit ID in the remote repository. + + Assume you are in root directory of Substrate. Run: + + ```bash + cd .maintain/ + ./node-template-release.sh + ``` + +2. Expand the output tar gzipped file and replace files in current Substrate Node Template +by running the following command. + + ```bash + # This is where the tar.gz file uncompressed + cd substrate-node-template + # rsync with force copying. Note the slash at the destination directory is important + rsync -avh * / + # For dry-running add `-n` argument + # rsync -avhn * / + ``` + + The above command only copies existing files from the source to the destination, but does not + delete files/directories that are removed from the source. So you need to manually check and + remove them in the destination. + +3. There are actually three packages in the Node Template, `node-template` (the node), +`node-template-runtime` (the runtime), and `pallet-template`, and each has its own `Cargo.toml`. +Inside these three files, dependencies are listed in expanded form and linked to a certain git +commit in Substrate remote repository, such as: + + ```toml + [dev-dependencies.sp-core] + default-features = false + git = 'https://github.com/paritytech/substrate.git' + rev = 'c1fe59d060600a10eebb4ace277af1fee20bad17' + version = '3.0.0' + ``` + + We will update each of them to the shortened form and link them to the Rust + [crate registry](https://crates.io/). After confirming the versioned package is published in + the crate, the above will become: + + ```toml + [dev-dependencies] + sp-core = { version = '3.0.0', default-features = false } + ``` + + P.S: This step can be automated if we update `node-template-release` package in + `.maintain/node-template-release`. + +4. Once the three `Cargo.toml`s are updated, compile and confirm that the Node Template builds. Then +commit the changes to a new branch in [Substrate Node Template](https://github.com/substrate-developer-hub/substrate-node-template), and make a PR. + + > Note that there is a chance the code in Substrate Node Template works with the linked Substrate git + commit but not with published packages due to the latest (as yet) unpublished features. In this case, + rollback that section of the Node Template to its previous version to ensure the Node Template builds. + +5. Once the PR is merged, tag the merged commit in master branch with the version number +`vX.Y.Z+A` (e.g. `v3.0.0+1`). The `X`(major), `Y`(minor), and `Z`(patch) version number should +follow Substrate release version. The last digit is any significant fixes made in the Substrate +Node Template apart from Substrate. When the Substrate version is updated, this digit is reset to 0. + +## Troubleshooting + +- Running the script `./node-template-release.sh `, after all tests passed + successfully, seeing the following error message: + + ``` + thread 'main' panicked at 'Creates output file: Os { code: 2, kind: NotFound, message: "No such file or directory" }', src/main.rs:250:10 +note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace + ``` + + This is likely due to that your output path is not a valid `tar.gz` filename or you don't have write + permission to the destination. Try with a simple output path such as `~/node-tpl.tar.gz`. diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index 42f876ff7f3de..227d45623d688 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -29,7 +29,7 @@ use frame_benchmarking::{ use frame_support::traits::Get; use frame_support::{traits::EnsureOrigin, dispatch::UnfilteredDispatchable}; -use crate::Module as Assets; +use crate::Pallet as Assets; const SEED: u32 = 0; @@ -120,13 +120,21 @@ fn add_approvals(minter: T::AccountId, n: u32) { } fn assert_last_event(generic_event: ::Event) { - let events = frame_system::Module::::events(); + let events = frame_system::Pallet::::events(); let system_event: ::Event = generic_event.into(); // compare to the last event record let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } +fn assert_event(generic_event: ::Event) { + let system_event: ::Event = generic_event.into(); + let events = frame_system::Pallet::::events(); + assert!(events.iter().any(|event_record| { + matches!(&event_record, frame_system::EventRecord { event, .. } if &system_event == event) + })); +} + benchmarks! { create { let caller: T::AccountId = whitelisted_caller(); @@ -193,7 +201,7 @@ benchmarks! { let target_lookup = T::Lookup::unlookup(target.clone()); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), target_lookup, amount) verify { - assert!(frame_system::Module::::account_exists(&caller)); + assert!(frame_system::Pallet::::account_exists(&caller)); assert_last_event::(Event::Transferred(Default::default(), caller, target, amount).into()); } @@ -383,7 +391,8 @@ benchmarks! { let dest_lookup = T::Lookup::unlookup(dest.clone()); }: _(SystemOrigin::Signed(delegate.clone()), id, owner_lookup, dest_lookup, amount) verify { - assert_last_event::(Event::TransferredApproved(id, owner, delegate, dest, amount).into()); + assert!(T::Currency::reserved_balance(&owner).is_zero()); + assert_event::(Event::Transferred(id, owner, dest, amount).into()); } cancel_approval { diff --git a/frame/assets/src/extra_mutator.rs b/frame/assets/src/extra_mutator.rs new file mode 100644 index 0000000000000..26a9a3f357c52 --- /dev/null +++ b/frame/assets/src/extra_mutator.rs @@ -0,0 +1,105 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Datatype for easy mutation of the extra "sidecar" data. + +use super::*; + +/// A mutator type allowing inspection and possible modification of the extra "sidecar" data. +/// +/// This may be used as a `Deref` for the pallet's extra data. If mutated (using `DerefMut`), then +/// any uncommitted changes (see `commit` function) will be automatically committed to storage when +/// dropped. Changes, even after committed, may be reverted to their original values with the +/// `revert` function. +pub struct ExtraMutator { + id: T::AssetId, + who: T::AccountId, + original: T::Extra, + pending: Option, +} + +impl Drop for ExtraMutator { + fn drop(&mut self) { + debug_assert!(self.commit().is_ok(), "attempt to write to non-existent asset account"); + } +} + +impl sp_std::ops::Deref for ExtraMutator { + type Target = T::Extra; + fn deref(&self) -> &T::Extra { + match self.pending { + Some(ref value) => value, + None => &self.original, + } + } +} + +impl sp_std::ops::DerefMut for ExtraMutator { + fn deref_mut(&mut self) -> &mut T::Extra { + if self.pending.is_none() { + self.pending = Some(self.original.clone()); + } + self.pending.as_mut().unwrap() + } +} + +impl ExtraMutator { + pub(super) fn maybe_new(id: T::AssetId, who: impl sp_std::borrow::Borrow) + -> Option> + { + if Account::::contains_key(id, who.borrow()) { + Some(ExtraMutator:: { + id, + who: who.borrow().clone(), + original: Account::::get(id, who.borrow()).extra, + pending: None, + }) + } else { + None + } + } + + + /// Commit any changes to storage. + pub fn commit(&mut self) -> Result<(), ()> { + if let Some(extra) = self.pending.take() { + Account::::try_mutate_exists(self.id, self.who.borrow(), |maybe_account| + if let Some(ref mut account) = maybe_account { + account.extra = extra; + Ok(()) + } else { + Err(()) + } + ) + } else { + Ok(()) + } + } + + /// Revert any changes, even those already committed by `self` and drop self. + pub fn revert(mut self) -> Result<(), ()> { + self.pending = None; + Account::::try_mutate_exists(self.id, self.who.borrow(), |maybe_account| + if let Some(ref mut account) = maybe_account { + account.extra = self.original.clone(); + Ok(()) + } else { + Err(()) + } + ) + } +} diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs new file mode 100644 index 0000000000000..197b010b6eb87 --- /dev/null +++ b/frame/assets/src/functions.rs @@ -0,0 +1,469 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Functions for the Assets pallet. + +use super::*; + +// The main implementation block for the module. +impl Pallet { + // Public immutables + + /// Return the extra "sid-car" data for `id`/`who`, or `None` if the account doesn't exist. + pub fn adjust_extra(id: T::AssetId, who: impl sp_std::borrow::Borrow) + -> Option> + { + ExtraMutator::maybe_new(id, who) + } + + /// Get the asset `id` balance of `who`. + pub fn balance(id: T::AssetId, who: impl sp_std::borrow::Borrow) -> T::Balance { + Account::::get(id, who.borrow()).balance + } + + /// Get the total supply of an asset `id`. + pub fn total_supply(id: T::AssetId) -> T::Balance { + Asset::::get(id).map(|x| x.supply).unwrap_or_else(Zero::zero) + } + + pub(super) fn new_account( + who: &T::AccountId, + d: &mut AssetDetails>, + ) -> Result { + let accounts = d.accounts.checked_add(1).ok_or(Error::::Overflow)?; + let is_sufficient = if d.is_sufficient { + frame_system::Pallet::::inc_sufficients(who); + d.sufficients += 1; + true + } else { + frame_system::Pallet::::inc_consumers(who).map_err(|_| Error::::NoProvider)?; + false + }; + d.accounts = accounts; + Ok(is_sufficient) + } + + pub(super) fn dead_account( + what: T::AssetId, + who: &T::AccountId, + d: &mut AssetDetails>, + sufficient: bool, + ) { + if sufficient { + d.sufficients = d.sufficients.saturating_sub(1); + frame_system::Pallet::::dec_sufficients(who); + } else { + frame_system::Pallet::::dec_consumers(who); + } + d.accounts = d.accounts.saturating_sub(1); + T::Freezer::died(what, who) + } + + pub(super) fn can_increase(id: T::AssetId, who: &T::AccountId, amount: T::Balance) -> DepositConsequence { + let details = match Asset::::get(id) { + Some(details) => details, + None => return DepositConsequence::UnknownAsset, + }; + if details.supply.checked_add(&amount).is_none() { + return DepositConsequence::Overflow + } + let account = Account::::get(id, who); + if account.balance.checked_add(&amount).is_none() { + return DepositConsequence::Overflow + } + if account.balance.is_zero() { + if amount < details.min_balance { + return DepositConsequence::BelowMinimum + } + if !details.is_sufficient && frame_system::Pallet::::providers(who) == 0 { + return DepositConsequence::CannotCreate + } + if details.is_sufficient && details.sufficients.checked_add(1).is_none() { + return DepositConsequence::Overflow + } + } + + DepositConsequence::Success + } + + /// Return the consequence of a withdraw. + pub(super) fn can_decrease( + id: T::AssetId, + who: &T::AccountId, + amount: T::Balance, + keep_alive: bool, + ) -> WithdrawConsequence { + use WithdrawConsequence::*; + let details = match Asset::::get(id) { + Some(details) => details, + None => return UnknownAsset, + }; + if details.supply.checked_sub(&amount).is_none() { + return Underflow + } + if details.is_frozen { + return Frozen + } + let account = Account::::get(id, who); + if account.is_frozen { + return Frozen + } + if let Some(rest) = account.balance.checked_sub(&amount) { + if let Some(frozen) = T::Freezer::frozen_balance(id, who) { + match frozen.checked_add(&details.min_balance) { + Some(required) if rest < required => return Frozen, + None => return Overflow, + _ => {} + } + } + + let is_provider = false; + let is_required = is_provider && !frame_system::Pallet::::can_dec_provider(who); + let must_keep_alive = keep_alive || is_required; + + if rest < details.min_balance { + if must_keep_alive { + WouldDie + } else { + ReducedToZero(rest) + } + } else { + Success + } + } else { + NoFunds + } + } + + // Maximum `amount` that can be passed into `can_withdraw` to result in a `WithdrawConsequence` + // of `Success`. + pub(super) fn reducible_balance( + id: T::AssetId, + who: &T::AccountId, + keep_alive: bool, + ) -> Result> { + let details = match Asset::::get(id) { + Some(details) => details, + None => return Err(Error::::Unknown), + }; + ensure!(!details.is_frozen, Error::::Frozen); + + let account = Account::::get(id, who); + ensure!(!account.is_frozen, Error::::Frozen); + + let amount = if let Some(frozen) = T::Freezer::frozen_balance(id, who) { + // Frozen balance: account CANNOT be deleted + let required = frozen.checked_add(&details.min_balance).ok_or(Error::::Overflow)?; + account.balance.saturating_sub(required) + } else { + let is_provider = false; + let is_required = is_provider && !frame_system::Pallet::::can_dec_provider(who); + if keep_alive || is_required { + // We want to keep the account around. + account.balance.saturating_sub(details.min_balance) + } else { + // Don't care if the account dies + account.balance + } + }; + Ok(amount.min(details.supply)) + } + + /// Make preparatory checks for debiting some funds from an account. Flags indicate requirements + /// of the debit. + /// + /// - `amount`: The amount desired to be debited. The actual amount returned for debit may be + /// less (in the case of `best_effort` being `true`) or greater by up to the minimum balance + /// less one. + /// - `keep_alive`: Require that `target` must stay alive. + /// - `respect_freezer`: Respect any freezes on the account or token (or not). + /// - `best_effort`: The debit amount may be less than `amount`. + /// + /// On success, the amount which should be debited (this will always be at least `amount` unless + /// `best_effort` is `true`) together with an optional value indicating the argument which must + /// be passed into the `melted` function of the `T::Freezer` if `Some`. + /// + /// If no valid debit can be made then return an `Err`. + pub(super) fn prep_debit( + id: T::AssetId, + target: &T::AccountId, + amount: T::Balance, + f: DebitFlags, + ) -> Result { + let actual = Self::reducible_balance(id, target, f.keep_alive)? + .min(amount); + ensure!(f.best_effort || actual >= amount, Error::::BalanceLow); + + let conseq = Self::can_decrease(id, target, actual, f.keep_alive); + let actual = match conseq.into_result() { + Ok(dust) => actual.saturating_add(dust), //< guaranteed by reducible_balance + Err(e) => { + debug_assert!(false, "passed from reducible_balance; qed"); + return Err(e.into()) + } + }; + + Ok(actual) + } + + /// Make preparatory checks for crediting some funds from an account. Flags indicate + /// requirements of the credit. + /// + /// - `amount`: The amount desired to be credited. + /// - `debit`: The amount by which some other account has been debited. If this is greater than + /// `amount`, then the `burn_dust` parameter takes effect. + /// - `burn_dust`: Indicates that in the case of debit being greater than amount, the additional + /// (dust) value should be burned, rather than credited. + /// + /// On success, the amount which should be credited (this will always be at least `amount`) + /// together with an optional value indicating the value which should be burned. The latter + /// will always be `None` as long as `burn_dust` is `false` or `debit` is no greater than + /// `amount`. + /// + /// If no valid credit can be made then return an `Err`. + pub(super) fn prep_credit( + id: T::AssetId, + dest: &T::AccountId, + amount: T::Balance, + debit: T::Balance, + burn_dust: bool, + ) -> Result<(T::Balance, Option), DispatchError> { + let (credit, maybe_burn) = match (burn_dust, debit.checked_sub(&amount)) { + (true, Some(dust)) => (amount, Some(dust)), + _ => (debit, None), + }; + Self::can_increase(id, &dest, credit).into_result()?; + Ok((credit, maybe_burn)) + } + + /// Increases the asset `id` balance of `beneficiary` by `amount`. + /// + /// This alters the registered supply of the asset and emits an event. + /// + /// Will return an error or will increase the amount by exactly `amount`. + pub(super) fn do_mint( + id: T::AssetId, + beneficiary: &T::AccountId, + amount: T::Balance, + maybe_check_issuer: Option, + ) -> DispatchResult { + Self::increase_balance(id, beneficiary, amount, |details| -> DispatchResult { + if let Some(check_issuer) = maybe_check_issuer { + ensure!(&check_issuer == &details.issuer, Error::::NoPermission); + } + debug_assert!(T::Balance::max_value() - details.supply >= amount, "checked in prep; qed"); + details.supply = details.supply.saturating_add(amount); + Ok(()) + })?; + Self::deposit_event(Event::Issued(id, beneficiary.clone(), amount)); + Ok(()) + } + + /// Increases the asset `id` balance of `beneficiary` by `amount`. + /// + /// LOW-LEVEL: Does not alter the supply of asset or emit an event. Use `do_mint` if you need + /// that. This is not intended to be used alone. + /// + /// Will return an error or will increase the amount by exactly `amount`. + pub(super) fn increase_balance( + id: T::AssetId, + beneficiary: &T::AccountId, + amount: T::Balance, + check: impl FnOnce(&mut AssetDetails>) -> DispatchResult, + ) -> DispatchResult { + if amount.is_zero() { return Ok(()) } + + Self::can_increase(id, beneficiary, amount).into_result()?; + Asset::::try_mutate(id, |maybe_details| -> DispatchResult { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + + check(details)?; + + Account::::try_mutate(id, beneficiary, |t| -> DispatchResult { + let new_balance = t.balance.saturating_add(amount); + ensure!(new_balance >= details.min_balance, TokenError::BelowMinimum); + if t.balance.is_zero() { + t.sufficient = Self::new_account(beneficiary, details)?; + } + t.balance = new_balance; + Ok(()) + })?; + Ok(()) + })?; + Ok(()) + } + + /// Reduces asset `id` balance of `target` by `amount`. Flags `f` can be given to alter whether + /// it attempts a `best_effort` or makes sure to `keep_alive` the account. + /// + /// This alters the registered supply of the asset and emits an event. + /// + /// Will return an error and do nothing or will decrease the amount and return the amount + /// reduced by. + pub(super) fn do_burn( + id: T::AssetId, + target: &T::AccountId, + amount: T::Balance, + maybe_check_admin: Option, + f: DebitFlags, + ) -> Result { + let actual = Self::decrease_balance(id, target, amount, f, |actual, details| { + // Check admin rights. + if let Some(check_admin) = maybe_check_admin { + ensure!(&check_admin == &details.admin, Error::::NoPermission); + } + + debug_assert!(details.supply >= actual, "checked in prep; qed"); + details.supply = details.supply.saturating_sub(actual); + + Ok(()) + })?; + Self::deposit_event(Event::Burned(id, target.clone(), actual)); + Ok(actual) + } + + /// Reduces asset `id` balance of `target` by `amount`. Flags `f` can be given to alter whether + /// it attempts a `best_effort` or makes sure to `keep_alive` the account. + /// + /// LOW-LEVEL: Does not alter the supply of asset or emit an event. Use `do_burn` if you need + /// that. This is not intended to be used alone. + /// + /// Will return an error and do nothing or will decrease the amount and return the amount + /// reduced by. + pub(super) fn decrease_balance( + id: T::AssetId, + target: &T::AccountId, + amount: T::Balance, + f: DebitFlags, + check: impl FnOnce( + T::Balance, + &mut AssetDetails>, + ) -> DispatchResult, + ) -> Result { + if amount.is_zero() { return Ok(amount) } + + let actual = Self::prep_debit(id, target, amount, f)?; + + Asset::::try_mutate(id, |maybe_details| -> DispatchResult { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + + check(actual, details)?; + + Account::::try_mutate_exists(id, target, |maybe_account| -> DispatchResult { + let mut account = maybe_account.take().unwrap_or_default(); + debug_assert!(account.balance >= actual, "checked in prep; qed"); + + // Make the debit. + account.balance = account.balance.saturating_sub(actual); + *maybe_account = if account.balance < details.min_balance { + debug_assert!(account.balance.is_zero(), "checked in prep; qed"); + Self::dead_account(id, target, details, account.sufficient); + None + } else { + Some(account) + }; + Ok(()) + })?; + + Ok(()) + })?; + + Ok(actual) + } + + /// Reduces the asset `id` balance of `source` by some `amount` and increases the balance of + /// `dest` by (similar) amount. + /// + /// Returns the actual amount placed into `dest`. Exact semantics are determined by the flags + /// `f`. + /// + /// Will fail if the amount transferred is so small that it cannot create the destination due + /// to minimum balance requirements. + pub(super) fn do_transfer( + id: T::AssetId, + source: &T::AccountId, + dest: &T::AccountId, + amount: T::Balance, + maybe_need_admin: Option, + f: TransferFlags, + ) -> Result { + // Early exist if no-op. + if amount.is_zero() { + Self::deposit_event(Event::Transferred(id, source.clone(), dest.clone(), amount)); + return Ok(amount) + } + + // Figure out the debit and credit, together with side-effects. + let debit = Self::prep_debit(id, &source, amount, f.into())?; + let (credit, maybe_burn) = Self::prep_credit(id, &dest, amount, debit, f.burn_dust)?; + + let mut source_account = Account::::get(id, &source); + + Asset::::try_mutate(id, |maybe_details| -> DispatchResult { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + + // Check admin rights. + if let Some(need_admin) = maybe_need_admin { + ensure!(&need_admin == &details.admin, Error::::NoPermission); + } + + // Skip if source == dest + if source == dest { + return Ok(()) + } + + // Burn any dust if needed. + if let Some(burn) = maybe_burn { + // Debit dust from supply; this will not saturate since it's already checked in prep. + debug_assert!(details.supply >= burn, "checked in prep; qed"); + details.supply = details.supply.saturating_sub(burn); + } + + // Debit balance from source; this will not saturate since it's already checked in prep. + debug_assert!(source_account.balance >= debit, "checked in prep; qed"); + source_account.balance = source_account.balance.saturating_sub(debit); + + Account::::try_mutate(id, &dest, |a| -> DispatchResult { + // Calculate new balance; this will not saturate since it's already checked in prep. + debug_assert!(a.balance.checked_add(&credit).is_some(), "checked in prep; qed"); + let new_balance = a.balance.saturating_add(credit); + + // Create a new account if there wasn't one already. + if a.balance.is_zero() { + a.sufficient = Self::new_account(&dest, details)?; + } + + a.balance = new_balance; + Ok(()) + })?; + + // Remove source account if it's now dead. + if source_account.balance < details.min_balance { + debug_assert!(source_account.balance.is_zero(), "checked in prep; qed"); + Self::dead_account(id, &source, details, source_account.sufficient); + Account::::remove(id, &source); + } else { + Account::::insert(id, &source, &source_account) + } + + Ok(()) + })?; + + Self::deposit_event(Event::Transferred(id, source.clone(), dest.clone(), credit)); + Ok(credit) + } +} diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs new file mode 100644 index 0000000000000..a4cff9b7e9a62 --- /dev/null +++ b/frame/assets/src/impl_fungibles.rs @@ -0,0 +1,153 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementations for fungibles trait. + +use super::*; + +impl fungibles::Inspect<::AccountId> for Pallet { + type AssetId = T::AssetId; + type Balance = T::Balance; + + fn total_issuance(asset: Self::AssetId) -> Self::Balance { + Asset::::get(asset).map(|x| x.supply).unwrap_or_else(Zero::zero) + } + + fn minimum_balance(asset: Self::AssetId) -> Self::Balance { + Asset::::get(asset).map(|x| x.min_balance).unwrap_or_else(Zero::zero) + } + + fn balance( + asset: Self::AssetId, + who: &::AccountId, + ) -> Self::Balance { + Pallet::::balance(asset, who) + } + + fn reducible_balance( + asset: Self::AssetId, + who: &::AccountId, + keep_alive: bool, + ) -> Self::Balance { + Pallet::::reducible_balance(asset, who, keep_alive).unwrap_or(Zero::zero()) + } + + fn can_deposit( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> DepositConsequence { + Pallet::::can_increase(asset, who, amount) + } + + fn can_withdraw( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence { + Pallet::::can_decrease(asset, who, amount, false) + } +} + +impl fungibles::Mutate<::AccountId> for Pallet { + fn mint_into( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> DispatchResult { + Self::do_mint(asset, who, amount, None) + } + + fn burn_from( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> Result { + let f = DebitFlags { + keep_alive: false, + best_effort: false, + }; + Self::do_burn(asset, who, amount, None, f) + } + + fn slash( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> Result { + let f = DebitFlags { + keep_alive: false, + best_effort: true, + }; + Self::do_burn(asset, who, amount, None, f) + } +} + +impl fungibles::Transfer for Pallet { + fn transfer( + asset: Self::AssetId, + source: &T::AccountId, + dest: &T::AccountId, + amount: T::Balance, + keep_alive: bool, + ) -> Result { + let f = TransferFlags { + keep_alive, + best_effort: false, + burn_dust: false + }; + Self::do_transfer(asset, source, dest, amount, None, f) + } +} + +impl fungibles::Unbalanced for Pallet { + fn set_balance(_: Self::AssetId, _: &T::AccountId, _: Self::Balance) -> DispatchResult { + unreachable!("set_balance is not used if other functions are impl'd"); + } + fn set_total_issuance(id: T::AssetId, amount: Self::Balance) { + Asset::::mutate_exists(id, |maybe_asset| if let Some(ref mut asset) = maybe_asset { + asset.supply = amount + }); + } + fn decrease_balance(asset: T::AssetId, who: &T::AccountId, amount: Self::Balance) + -> Result + { + let f = DebitFlags { keep_alive: false, best_effort: false }; + Self::decrease_balance(asset, who, amount, f, |_, _| Ok(())) + } + fn decrease_balance_at_most(asset: T::AssetId, who: &T::AccountId, amount: Self::Balance) + -> Self::Balance + { + let f = DebitFlags { keep_alive: false, best_effort: true }; + Self::decrease_balance(asset, who, amount, f, |_, _| Ok(())) + .unwrap_or(Zero::zero()) + } + fn increase_balance(asset: T::AssetId, who: &T::AccountId, amount: Self::Balance) + -> Result + { + Self::increase_balance(asset, who, amount, |_| Ok(()))?; + Ok(amount) + } + fn increase_balance_at_most(asset: T::AssetId, who: &T::AccountId, amount: Self::Balance) + -> Self::Balance + { + match Self::increase_balance(asset, who, amount, |_| Ok(())) { + Ok(()) => amount, + Err(_) => Zero::zero(), + } + } +} diff --git a/frame/assets/src/impl_stored_map.rs b/frame/assets/src/impl_stored_map.rs new file mode 100644 index 0000000000000..a8a6f95557dfb --- /dev/null +++ b/frame/assets/src/impl_stored_map.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Assets pallet's `StoredMap` implementation. + +use super::*; + +impl StoredMap<(T::AssetId, T::AccountId), T::Extra> for Pallet { + fn get(id_who: &(T::AssetId, T::AccountId)) -> T::Extra { + let &(id, ref who) = id_who; + if Account::::contains_key(id, who) { + Account::::get(id, who).extra + } else { + Default::default() + } + } + + fn try_mutate_exists>( + id_who: &(T::AssetId, T::AccountId), + f: impl FnOnce(&mut Option) -> Result, + ) -> Result { + let &(id, ref who) = id_who; + let mut maybe_extra = Some(Account::::get(id, who).extra); + let r = f(&mut maybe_extra)?; + // They want to write some value or delete it. + // If the account existed and they want to write a value, then we write. + // If the account didn't exist and they want to delete it, then we let it pass. + // Otherwise, we fail. + Account::::try_mutate_exists(id, who, |maybe_account| { + if let Some(extra) = maybe_extra { + // They want to write a value. Let this happen only if the account actually exists. + if let Some(ref mut account) = maybe_account { + account.extra = extra; + } else { + Err(StoredMapError::NoProviders)?; + } + } else { + // They want to delete it. Let this pass if the item never existed anyway. + ensure!(maybe_account.is_none(), StoredMapError::ConsumerRemaining); + } + Ok(r) + }) + } +} diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index e5cb39db2b8e5..2a162c2c936b1 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -130,127 +130,30 @@ pub mod mock; #[cfg(test)] mod tests; -use sp_std::prelude::*; +mod extra_mutator; +pub use extra_mutator::*; +mod impl_stored_map; +mod impl_fungibles; +mod functions; +mod types; +pub use types::*; + +use sp_std::{prelude::*, borrow::Borrow}; use sp_runtime::{ - RuntimeDebug, - traits::{ - AtLeast32BitUnsigned, Zero, StaticLookup, Saturating, CheckedSub, CheckedAdd, + RuntimeDebug, TokenError, traits::{ + AtLeast32BitUnsigned, Zero, StaticLookup, Saturating, CheckedSub, CheckedAdd, Bounded, + StoredMapError, } }; use codec::{Encode, Decode, HasCompact}; -use frame_support::{ - ensure, - traits::{Currency, ReservableCurrency, BalanceStatus::Reserved}, - dispatch::{DispatchError, DispatchResult}, -}; -pub use weights::WeightInfo; +use frame_support::{ensure, dispatch::{DispatchError, DispatchResult}}; +use frame_support::traits::{Currency, ReservableCurrency, BalanceStatus::Reserved, StoredMap}; +use frame_support::traits::tokens::{WithdrawConsequence, DepositConsequence, fungibles}; +use frame_system::Config as SystemConfig; +pub use weights::WeightInfo; pub use pallet::*; -type DepositBalanceOf = <::Currency as Currency<::AccountId>>::Balance; - -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -pub struct AssetDetails< - Balance, - AccountId, - DepositBalance, -> { - /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. - owner: AccountId, - /// Can mint tokens. - issuer: AccountId, - /// Can thaw tokens, force transfers and burn tokens from any account. - admin: AccountId, - /// Can freeze tokens. - freezer: AccountId, - /// The total supply across all accounts. - supply: Balance, - /// The balance deposited for this asset. This pays for the data stored here. - deposit: DepositBalance, - /// The ED for virtual accounts. - min_balance: Balance, - /// If `true`, then any account with this asset is given a provider reference. Otherwise, it - /// requires a consumer reference. - is_sufficient: bool, - /// The total number of accounts. - accounts: u32, - /// The total number of accounts for which we have placed a self-sufficient reference. - sufficients: u32, - /// The total number of approvals. - approvals: u32, - /// Whether the asset is frozen for non-admin transfers. - is_frozen: bool, -} - -impl AssetDetails { - pub fn destroy_witness(&self) -> DestroyWitness { - DestroyWitness { - accounts: self.accounts, - sufficients: self.sufficients, - approvals: self.approvals, - } - } -} - -/// A pair to act as a key for the approval storage map. -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -pub struct ApprovalKey { - /// The owner of the funds that are being approved. - owner: AccountId, - /// The party to whom transfer of the funds is being delegated. - delegate: AccountId, -} - -/// Data concerning an approval. -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] -pub struct Approval { - /// The amount of funds approved for the balance transfer from the owner to some delegated - /// target. - amount: Balance, - /// The amount reserved on the owner's account to hold this item in storage. - deposit: DepositBalance, -} - -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] -pub struct AssetBalance { - /// The balance. - balance: Balance, - /// Whether the account is frozen. - is_frozen: bool, - /// `true` if this balance gave the account a self-sufficient reference. - sufficient: bool, -} - -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] -pub struct AssetMetadata { - /// The balance deposited for this metadata. - /// - /// This pays for the data stored in this struct. - deposit: DepositBalance, - /// The user friendly name of this asset. Limited in length by `StringLimit`. - name: Vec, - /// The ticker symbol for this asset. Limited in length by `StringLimit`. - symbol: Vec, - /// The number of decimals this asset uses to represent one unit. - decimals: u8, - /// Whether the asset metadata may be changed by a non Force origin. - is_frozen: bool, -} - -/// Witness data for the destroy transactions. -#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -pub struct DestroyWitness { - /// The number of accounts holding the asset. - #[codec(compact)] - accounts: u32, - /// The number of accounts holding the asset with a self-sufficient reference. - #[codec(compact)] - sufficients: u32, - /// The number of transfer-approvals of the asset. - #[codec(compact)] - approvals: u32, -} - #[frame_support::pallet] pub mod pallet { use frame_support::{ @@ -273,7 +176,7 @@ pub mod pallet { /// The units in which we record balances. type Balance: Member + Parameter + AtLeast32BitUnsigned + Default + Copy; - /// The arithmetic type of asset identifier. + /// Identifier for the class of asset. type AssetId: Member + Parameter + Default + Copy + HasCompact; /// The currency mechanism. @@ -299,6 +202,13 @@ pub mod pallet { /// The maximum length of a name or symbol stored on-chain. type StringLimit: Get; + /// A hook to allow a per-asset, per-account minimum balance to be enforced. This must be + /// respected in all permissionless operations. + type Freezer: FrozenBalance; + + /// Additional data to be stored with an account's asset balance. + type Extra: Member + Parameter + Default; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -320,7 +230,7 @@ pub mod pallet { T::AssetId, Blake2_128Concat, T::AccountId, - AssetBalance, + AssetBalance, ValueQuery, >; @@ -435,8 +345,7 @@ pub mod pallet { /// /// The origin must be Signed and the sender must have sufficient funds free. /// - /// Funds of sender are reserved according to the formula: - /// `AssetDepositBase + AssetDepositPerZombie * max_zombies`. + /// Funds of sender are reserved by `AssetDeposit`. /// /// Parameters: /// - `id`: The identifier of the new asset. This must not be currently in use to identify @@ -574,7 +483,7 @@ pub mod pallet { ensure!(details.approvals == witness.approvals, Error::::BadWitness); for (who, v) in Account::::drain_prefix(id) { - Self::dead_account(&who, &mut details, v.sufficient); + Self::dead_account(id, &who, &mut details, v.sufficient); } debug_assert_eq!(details.accounts, 0); debug_assert_eq!(details.sufficients, 0); @@ -611,25 +520,9 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; - - Asset::::try_mutate(id, |maybe_details| { - let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; - - ensure!(&origin == &details.issuer, Error::::NoPermission); - details.supply = details.supply.checked_add(&amount).ok_or(Error::::Overflow)?; - - Account::::try_mutate(id, &beneficiary, |t| -> DispatchResult { - let new_balance = t.balance.saturating_add(amount); - ensure!(new_balance >= details.min_balance, Error::::BalanceLow); - if t.balance.is_zero() { - t.sufficient = Self::new_account(&beneficiary, details)?; - } - t.balance = new_balance; - Ok(()) - })?; - Self::deposit_event(Event::Issued(id, beneficiary, amount)); - Ok(()) - }) + Self::do_mint(id, &beneficiary, amount, Some(origin))?; + Self::deposit_event(Event::Issued(id, beneficiary, amount)); + Ok(()) } /// Reduce the balance of `who` by as much as possible up to `amount` assets of `id`. @@ -657,33 +550,10 @@ pub mod pallet { let origin = ensure_signed(origin)?; let who = T::Lookup::lookup(who)?; - Asset::::try_mutate(id, |maybe_details| { - let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; - ensure!(&origin == &d.admin, Error::::NoPermission); - - let burned = Account::::try_mutate_exists( - id, - &who, - |maybe_account| -> Result { - let mut account = maybe_account.take().ok_or(Error::::BalanceZero)?; - let mut burned = amount.min(account.balance); - account.balance -= burned; - *maybe_account = if account.balance < d.min_balance { - burned += account.balance; - Self::dead_account(&who, d, account.sufficient); - None - } else { - Some(account) - }; - Ok(burned) - } - )?; - - d.supply = d.supply.saturating_sub(burned); - - Self::deposit_event(Event::Burned(id, who, burned)); - Ok(()) - }) + let f = DebitFlags { keep_alive: false, best_effort: true }; + let burned = Self::do_burn(id, &who, amount, Some(origin), f)?; + Self::deposit_event(Event::Burned(id, who, burned)); + Ok(()) } /// Move some assets from the sender account to another. @@ -714,9 +584,12 @@ pub mod pallet { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(target)?; - Self::do_transfer(id, &origin, &dest, amount, None, false)?; - Self::deposit_event(Event::Transferred(id, origin, dest, amount)); - Ok(()) + let f = TransferFlags { + keep_alive: false, + best_effort: false, + burn_dust: false + }; + Self::do_transfer(id, &origin, &dest, amount, None, f).map(|_| ()) } /// Move some assets from the sender account to another, keeping the sender account alive. @@ -744,12 +617,15 @@ pub mod pallet { target: ::Source, #[pallet::compact] amount: T::Balance ) -> DispatchResult { - let origin = ensure_signed(origin)?; + let source = ensure_signed(origin)?; let dest = T::Lookup::lookup(target)?; - Self::do_transfer(id, &origin, &dest, amount, None, true)?; - Self::deposit_event(Event::Transferred(id, origin, dest, amount)); - Ok(()) + let f = TransferFlags { + keep_alive: true, + best_effort: false, + burn_dust: false + }; + Self::do_transfer(id, &source, &dest, amount, None, f).map(|_| ()) } /// Move some assets from one account to another. @@ -783,9 +659,12 @@ pub mod pallet { let source = T::Lookup::lookup(source)?; let dest = T::Lookup::lookup(dest)?; - Self::do_transfer(id, &source, &dest, amount, Some(origin), false)?; - Self::deposit_event(Event::Transferred(id, source, dest, amount)); - Ok(()) + let f = TransferFlags { + keep_alive: false, + best_effort: false, + burn_dust: false + }; + Self::do_transfer(id, &source, &dest, amount, Some(origin), f).map(|_| ()) } /// Disallow further unprivileged transfers from an account. @@ -1338,7 +1217,12 @@ pub mod pallet { let mut approved = maybe_approved.take().ok_or(Error::::Unapproved)?; let remaining = approved.amount.checked_sub(&amount).ok_or(Error::::Unapproved)?; - Self::do_transfer(id, &key.owner, &destination, amount, None, false)?; + let f = TransferFlags { + keep_alive: false, + best_effort: false, + burn_dust: false + }; + Self::do_transfer(id, &key.owner, &destination, amount, None, f)?; if remaining.is_zero() { T::Currency::unreserve(&key.owner, approved.deposit); @@ -1348,113 +1232,7 @@ pub mod pallet { } Ok(()) })?; - let event = Event::TransferredApproved(id, key.owner, key.delegate, destination, amount); - Self::deposit_event(event); Ok(()) } } } - -// The main implementation block for the module. -impl Pallet { - // Public immutables - - /// Get the asset `id` balance of `who`. - pub fn balance(id: T::AssetId, who: T::AccountId) -> T::Balance { - Account::::get(id, who).balance - } - - /// Get the total supply of an asset `id`. - pub fn total_supply(id: T::AssetId) -> T::Balance { - Asset::::get(id).map(|x| x.supply).unwrap_or_else(Zero::zero) - } - - fn new_account( - who: &T::AccountId, - d: &mut AssetDetails>, - ) -> Result { - let accounts = d.accounts.checked_add(1).ok_or(Error::::Overflow)?; - let is_sufficient = if d.is_sufficient { - frame_system::Module::::inc_sufficients(who); - d.sufficients += 1; - true - } else { - frame_system::Module::::inc_consumers(who).map_err(|_| Error::::NoProvider)?; - false - }; - d.accounts = accounts; - Ok(is_sufficient) - } - - fn dead_account( - who: &T::AccountId, - d: &mut AssetDetails>, - sufficient: bool, - ) { - if sufficient { - d.sufficients = d.sufficients.saturating_sub(1); - frame_system::Module::::dec_sufficients(who); - } else { - frame_system::Module::::dec_consumers(who); - } - d.accounts = d.accounts.saturating_sub(1); - } - - fn do_transfer( - id: T::AssetId, - source: &T::AccountId, - dest: &T::AccountId, - amount: T::Balance, - maybe_need_admin: Option, - keep_alive: bool, - ) -> DispatchResult { - let mut source_account = Account::::get(id, source); - ensure!(!source_account.is_frozen, Error::::Frozen); - - source_account.balance = source_account.balance.checked_sub(&amount) - .ok_or(Error::::BalanceLow)?; - - Asset::::try_mutate(id, |maybe_details| { - let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; - ensure!(!details.is_frozen, Error::::Frozen); - - if let Some(need_admin) = maybe_need_admin { - ensure!(&need_admin == &details.admin, Error::::NoPermission); - } - - if dest == source || amount.is_zero() { - return Ok(()) - } - - let mut amount = amount; - if source_account.balance < details.min_balance { - ensure!(!keep_alive, Error::::WouldDie); - amount += source_account.balance; - source_account.balance = Zero::zero(); - } - - Account::::try_mutate(id, dest, |a| -> DispatchResult { - let new_balance = a.balance.saturating_add(amount); - - // This is impossible since `new_balance > amount > min_balance`, but we can - // handle it, so we do. - ensure!(new_balance >= details.min_balance, Error::::BalanceLow); - - if a.balance.is_zero() { - a.sufficient = Self::new_account(dest, details)?; - } - a.balance = new_balance; - Ok(()) - })?; - - if source_account.balance.is_zero() { - Self::dead_account(source, details, source_account.sufficient); - Account::::remove(id, source); - } else { - Account::::insert(id, source, &source_account) - } - - Ok(()) - }) - } -} diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 434a7ccce0757..26ff938512a2f 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -33,9 +33,9 @@ construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Assets: pallet_assets::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Assets: pallet_assets::{Pallet, Call, Storage, Event}, } ); @@ -100,7 +100,42 @@ impl Config for Test { type MetadataDepositPerByte = MetadataDepositPerByte; type ApprovalDeposit = ApprovalDeposit; type StringLimit = StringLimit; + type Freezer = TestFreezer; type WeightInfo = (); + type Extra = (); +} + +use std::cell::RefCell; +use std::collections::HashMap; + +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub(crate) enum Hook { + Died(u32, u64), +} +thread_local! { + static FROZEN: RefCell> = RefCell::new(Default::default()); + static HOOKS: RefCell> = RefCell::new(Default::default()); +} + +pub struct TestFreezer; +impl FrozenBalance for TestFreezer { + fn frozen_balance(asset: u32, who: &u64) -> Option { + FROZEN.with(|f| f.borrow().get(&(asset, who.clone())).cloned()) + } + + fn died(asset: u32, who: &u64) { + HOOKS.with(|h| h.borrow_mut().push(Hook::Died(asset, who.clone()))); + } +} + +pub(crate) fn set_frozen_balance(asset: u32, who: u64, amount: u64) { + FROZEN.with(|f| f.borrow_mut().insert((asset, who), amount)); +} +pub(crate) fn clear_frozen_balance(asset: u32, who: u64) { + FROZEN.with(|f| f.borrow_mut().remove(&(asset, who))); +} +pub(crate) fn hooks() -> Vec { + HOOKS.with(|h| h.borrow().clone()) } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index 89173b64d5898..953164a0b9380 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -19,11 +19,12 @@ use super::*; use crate::{Error, mock::*}; +use sp_runtime::TokenError; use frame_support::{assert_ok, assert_noop, traits::Currency}; use pallet_balances::Error as BalancesError; fn last_event() -> mock::Event { - frame_system::Module::::events().pop().expect("Event expected").event + frame_system::Pallet::::events().pop().expect("Event expected").event } #[test] @@ -198,11 +199,11 @@ fn non_providing_should_work() { assert_ok!(Assets::mint(Origin::signed(1), 0, 0, 100)); // Cannot mint into account 2 since it doesn't (yet) exist... - assert_noop!(Assets::mint(Origin::signed(1), 0, 1, 100), Error::::NoProvider); + assert_noop!(Assets::mint(Origin::signed(1), 0, 1, 100), TokenError::CannotCreate); // ...or transfer... - assert_noop!(Assets::transfer(Origin::signed(0), 0, 1, 50), Error::::NoProvider); + assert_noop!(Assets::transfer(Origin::signed(0), 0, 1, 50), TokenError::CannotCreate); // ...or force-transfer - assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 0, 1, 50), Error::::NoProvider); + assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 0, 1, 50), TokenError::CannotCreate); Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); @@ -219,12 +220,11 @@ fn min_balance_should_work() { assert_eq!(Asset::::get(0).unwrap().accounts, 1); // Cannot create a new account with a balance that is below minimum... - assert_noop!(Assets::mint(Origin::signed(1), 0, 2, 9), Error::::BalanceLow); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 9), Error::::BalanceLow); - assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 9), Error::::BalanceLow); + assert_noop!(Assets::mint(Origin::signed(1), 0, 2, 9), TokenError::BelowMinimum); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 9), TokenError::BelowMinimum); + assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 9), TokenError::BelowMinimum); // When deducting from an account to below minimum, it should be reaped. - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 91)); assert!(Assets::balance(0, 1).is_zero()); assert_eq!(Assets::balance(0, 2), 100); @@ -277,7 +277,7 @@ fn transferring_enough_to_kill_source_when_keep_alive_should_fail() { assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_noop!(Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 91), Error::::WouldDie); + assert_noop!(Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 91), Error::::BalanceLow); assert_ok!(Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 90)); assert_eq!(Assets::balance(0, 1), 10); assert_eq!(Assets::balance(0, 2), 90); @@ -430,12 +430,14 @@ fn burning_asset_balance_with_positive_balance_should_work() { } #[test] -fn burning_asset_balance_with_zero_balance_should_not_work() { +fn burning_asset_balance_with_zero_balance_does_nothing() { new_test_ext().execute_with(|| { assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 2), 0); - assert_noop!(Assets::burn(Origin::signed(1), 0, 2, u64::max_value()), Error::::BalanceZero); + assert_ok!(Assets::burn(Origin::signed(1), 0, 2, u64::max_value())); + assert_eq!(Assets::balance(0, 2), 0); + assert_eq!(Assets::total_supply(0), 100); }); } @@ -491,3 +493,66 @@ fn set_metadata_should_work() { } // TODO: tests for force_set_metadata, force_clear_metadata, force_asset_status +// https://github.com/paritytech/substrate/issues/8470 + +#[test] +fn freezer_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + + + // freeze 50 of it. + set_frozen_balance(0, 1, 50); + + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 20)); + // cannot transfer another 21 away as this would take the non-frozen balance (30) to below + // the minimum balance (10). + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 21), Error::::BalanceLow); + + // create an approved transfer... + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + let e = Error::::BalanceLow; + // ...but that wont work either: + assert_noop!(Assets::transfer_approved(Origin::signed(2), 0, 1, 2, 21), e); + // a force transfer won't work also. + let e = Error::::BalanceLow; + assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 21), e); + + // reduce it to only 49 frozen... + set_frozen_balance(0, 1, 49); + // ...and it's all good: + assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 21)); + + // and if we clear it, we can remove the account completely. + clear_frozen_balance(0, 1); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(hooks(), vec![Hook::Died(0, 1)]); + }); +} + +#[test] +fn imbalances_should_work() { + use frame_support::traits::tokens::fungibles::Balanced; + + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + + let imb = Assets::issue(0, 100); + assert_eq!(Assets::total_supply(0), 100); + assert_eq!(imb.peek(), 100); + + let (imb1, imb2) = imb.split(30); + assert_eq!(imb1.peek(), 30); + assert_eq!(imb2.peek(), 70); + + drop(imb2); + assert_eq!(Assets::total_supply(0), 30); + + assert!(Assets::resolve(&1, imb1).is_ok()); + assert_eq!(Assets::balance(0, 1), 30); + assert_eq!(Assets::total_supply(0), 30); + }); +} diff --git a/frame/assets/src/types.rs b/frame/assets/src/types.rs new file mode 100644 index 0000000000000..7e0e235b1b7e6 --- /dev/null +++ b/frame/assets/src/types.rs @@ -0,0 +1,186 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various basic tyoes for use in the assets pallet. + +use super::*; + +pub(super) type DepositBalanceOf = <::Currency as Currency<::AccountId>>::Balance; + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] +pub struct AssetDetails< + Balance, + AccountId, + DepositBalance, +> { + /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. + pub(super) owner: AccountId, + /// Can mint tokens. + pub(super) issuer: AccountId, + /// Can thaw tokens, force transfers and burn tokens from any account. + pub(super) admin: AccountId, + /// Can freeze tokens. + pub(super) freezer: AccountId, + /// The total supply across all accounts. + pub(super) supply: Balance, + /// The balance deposited for this asset. This pays for the data stored here. + pub(super) deposit: DepositBalance, + /// The ED for virtual accounts. + pub(super) min_balance: Balance, + /// If `true`, then any account with this asset is given a provider reference. Otherwise, it + /// requires a consumer reference. + pub(super) is_sufficient: bool, + /// The total number of accounts. + pub(super) accounts: u32, + /// The total number of accounts for which we have placed a self-sufficient reference. + pub(super) sufficients: u32, + /// The total number of approvals. + pub(super) approvals: u32, + /// Whether the asset is frozen for non-admin transfers. + pub(super) is_frozen: bool, +} + +impl AssetDetails { + pub fn destroy_witness(&self) -> DestroyWitness { + DestroyWitness { + accounts: self.accounts, + sufficients: self.sufficients, + approvals: self.approvals, + } + } +} + +/// A pair to act as a key for the approval storage map. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] +pub struct ApprovalKey { + /// The owner of the funds that are being approved. + pub(super) owner: AccountId, + /// The party to whom transfer of the funds is being delegated. + pub(super) delegate: AccountId, +} + +/// Data concerning an approval. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] +pub struct Approval { + /// The amount of funds approved for the balance transfer from the owner to some delegated + /// target. + pub(super) amount: Balance, + /// The amount reserved on the owner's account to hold this item in storage. + pub(super) deposit: DepositBalance, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] +pub struct AssetBalance { + /// The balance. + pub(super) balance: Balance, + /// Whether the account is frozen. + pub(super) is_frozen: bool, + /// `true` if this balance gave the account a self-sufficient reference. + pub(super) sufficient: bool, + /// Additional "sidecar" data, in case some other pallet wants to use this storage item. + pub(super) extra: Extra, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] +pub struct AssetMetadata { + /// The balance deposited for this metadata. + /// + /// This pays for the data stored in this struct. + pub(super) deposit: DepositBalance, + /// The user friendly name of this asset. Limited in length by `StringLimit`. + pub(super) name: Vec, + /// The ticker symbol for this asset. Limited in length by `StringLimit`. + pub(super) symbol: Vec, + /// The number of decimals this asset uses to represent one unit. + pub(super) decimals: u8, + /// Whether the asset metadata may be changed by a non Force origin. + pub(super) is_frozen: bool, +} + +/// Witness data for the destroy transactions. +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] +pub struct DestroyWitness { + /// The number of accounts holding the asset. + #[codec(compact)] + pub(super) accounts: u32, + /// The number of accounts holding the asset with a self-sufficient reference. + #[codec(compact)] + pub(super) sufficients: u32, + /// The number of transfer-approvals of the asset. + #[codec(compact)] + pub(super) approvals: u32, +} + +/// Trait for allowing a minimum balance on the account to be specified, beyond the +/// `minimum_balance` of the asset. This is additive - the `minimum_balance` of the asset must be +/// met *and then* anything here in addition. +pub trait FrozenBalance { + /// Return the frozen balance. Under normal behaviour, this amount should always be + /// withdrawable. + /// + /// In reality, the balance of every account must be at least the sum of this (if `Some`) and + /// the asset's minimum_balance, since there may be complications to destroying an asset's + /// account completely. + /// + /// If `None` is returned, then nothing special is enforced. + /// + /// If any operation ever breaks this requirement (which will only happen through some sort of + /// privileged intervention), then `melted` is called to do any cleanup. + fn frozen_balance(asset: AssetId, who: &AccountId) -> Option; + + /// Called when an account has been removed. + fn died(asset: AssetId, who: &AccountId); +} + +impl FrozenBalance for () { + fn frozen_balance(_: AssetId, _: &AccountId) -> Option { None } + fn died(_: AssetId, _: &AccountId) {} +} + +#[derive(Copy, Clone, PartialEq, Eq)] +pub(super) struct TransferFlags { + /// The debited account must stay alive at the end of the operation; an error is returned if + /// this cannot be achieved legally. + pub(super) keep_alive: bool, + /// Less than the amount specified needs be debited by the operation for it to be considered + /// successful. If `false`, then the amount debited will always be at least the amount + /// specified. + pub(super) best_effort: bool, + /// Any additional funds debited (due to minimum balance requirements) should be burned rather + /// than credited to the destination account. + pub(super) burn_dust: bool, +} + +#[derive(Copy, Clone, PartialEq, Eq)] +pub(super) struct DebitFlags { + /// The debited account must stay alive at the end of the operation; an error is returned if + /// this cannot be achieved legally. + pub(super) keep_alive: bool, + /// Less than the amount specified needs be debited by the operation for it to be considered + /// successful. If `false`, then the amount debited will always be at least the amount + /// specified. + pub(super) best_effort: bool, +} + +impl From for DebitFlags { + fn from(f: TransferFlags) -> Self { + Self { + keep_alive: f.keep_alive, + best_effort: f.best_effort, + } + } +} diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index e6d44d73c40d2..513a9343a72e1 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -17,15 +17,15 @@ //! # Atomic Swap //! -//! A module for atomically sending funds. +//! A pallet for atomically sending funds. //! -//! - [`atomic_swap::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! ## Overview //! -//! A module for atomically sending funds from an origin to a target. A proof +//! A pallet for atomically sending funds from an origin to a target. A proof //! is used to allow the target to approve (claim) the swap. If the swap is not //! claimed within a specified duration of time, the sender may cancel it. //! @@ -33,9 +33,9 @@ //! //! ### Dispatchable Functions //! -//! * `create_swap` - called by a sender to register a new atomic swap -//! * `claim_swap` - called by the target to approve a swap -//! * `cancel_swap` - may be called by a sender after a specified duration +//! * [`create_swap`](Call::create_swap) - called by a sender to register a new atomic swap +//! * [`claim_swap`](Call::claim_swap) - called by the target to approve a swap +//! * [`cancel_swap`](Call::cancel_swap) - may be called by a sender after a specified duration // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -45,17 +45,16 @@ mod tests; use sp_std::{prelude::*, marker::PhantomData, ops::{Deref, DerefMut}}; use sp_io::hashing::blake2_256; use frame_support::{ - Parameter, decl_module, decl_storage, decl_event, decl_error, ensure, + RuntimeDebugNoBound, traits::{Get, Currency, ReservableCurrency, BalanceStatus}, weights::Weight, dispatch::DispatchResult, }; -use frame_system::{self as system, ensure_signed}; use codec::{Encode, Decode}; use sp_runtime::RuntimeDebug; /// Pending atomic swap operation. -#[derive(Clone, Eq, PartialEq, RuntimeDebug, Encode, Decode)] +#[derive(Clone, Eq, PartialEq, RuntimeDebugNoBound, Encode, Decode)] pub struct PendingSwap { /// Source of the swap. pub source: T::AccountId, @@ -135,35 +134,50 @@ impl SwapAction for BalanceSwapAction> + Into<::Event>; - /// Swap action. - type SwapAction: SwapAction + Parameter; - /// Limit of proof size. - /// - /// Atomic swap is only atomic if once the proof is revealed, both parties can submit the proofs - /// on-chain. If A is the one that generates the proof, then it requires that either: - /// - A's blockchain has the same proof length limit as B's blockchain. - /// - Or A's blockchain has shorter proof length limit as B's blockchain. - /// - /// If B sees A is on a blockchain with larger proof length limit, then it should kindly refuse - /// to accept the atomic swap request if A generates the proof, and asks that B generates the - /// proof instead. - type ProofLimit: Get; -} - -decl_storage! { - trait Store for Module as AtomicSwap { - pub PendingSwaps: double_map - hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) HashedProof - => Option>; +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; + + /// Atomic swap's pallet configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + /// Swap action. + type SwapAction: SwapAction + Parameter; + /// Limit of proof size. + /// + /// Atomic swap is only atomic if once the proof is revealed, both parties can submit the proofs + /// on-chain. If A is the one that generates the proof, then it requires that either: + /// - A's blockchain has the same proof length limit as B's blockchain. + /// - Or A's blockchain has shorter proof length limit as B's blockchain. + /// + /// If B sees A is on a blockchain with larger proof length limit, then it should kindly refuse + /// to accept the atomic swap request if A generates the proof, and asks that B generates the + /// proof instead. + type ProofLimit: Get; } -} -decl_error! { - pub enum Error for Module { + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); + + #[pallet::storage] + pub type PendingSwaps = StorageDoubleMap<_, + Twox64Concat, T::AccountId, + Blake2_128Concat, HashedProof, + PendingSwap, + >; + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::error] + pub enum Error { /// Swap already exists. AlreadyExist, /// Swap proof is invalid. @@ -181,31 +195,27 @@ decl_error! { /// Duration has not yet passed for the swap to be cancelled. DurationNotPassed, } -} -decl_event!( /// Event of atomic swap pallet. - pub enum Event where - AccountId = ::AccountId, - PendingSwap = PendingSwap, - { + #[pallet::event] + #[pallet::metadata(T::AccountId = "AccountId", PendingSwap = "PendingSwap")] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { /// Swap created. \[account, proof, swap\] - NewSwap(AccountId, HashedProof, PendingSwap), - /// Swap claimed. The last parameter indicates whether the execution succeeds. + NewSwap(T::AccountId, HashedProof, PendingSwap), + /// Swap claimed. The last parameter indicates whether the execution succeeds. /// \[account, proof, success\] - SwapClaimed(AccountId, HashedProof, bool), + SwapClaimed(T::AccountId, HashedProof, bool), /// Swap cancelled. \[account, proof\] - SwapCancelled(AccountId, HashedProof), + SwapCancelled(T::AccountId, HashedProof), } -); - -decl_module! { - /// Module definition of atomic swap pallet. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - fn deposit_event() = default; + /// Old name generated by `decl_event`. + #[deprecated(note="use `Event` instead")] + pub type RawEvent = Event; + #[pallet::call] + impl Pallet { /// Register a new atomic swap, declaring an intention to send funds from origin to target /// on the current blockchain. The target can claim the fund using the revealed proof. If /// the fund is not claimed after `duration` blocks, then the sender can cancel the swap. @@ -218,14 +228,14 @@ decl_module! { /// - `duration`: Locked duration of the atomic swap. For safety reasons, it is recommended /// that the revealer uses a shorter duration than the counterparty, to prevent the /// situation where the revealer reveals the proof too late around the end block. - #[weight = T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000)] - fn create_swap( - origin, + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] + pub(crate) fn create_swap( + origin: OriginFor, target: T::AccountId, hashed_proof: HashedProof, action: T::SwapAction, duration: T::BlockNumber, - ) { + ) -> DispatchResult { let source = ensure_signed(origin)?; ensure!( !PendingSwaps::::contains_key(&target, hashed_proof), @@ -237,13 +247,15 @@ decl_module! { let swap = PendingSwap { source, action, - end_block: frame_system::Module::::block_number() + duration, + end_block: frame_system::Pallet::::block_number() + duration, }; PendingSwaps::::insert(target.clone(), hashed_proof.clone(), swap.clone()); Self::deposit_event( - RawEvent::NewSwap(target, hashed_proof, swap) + Event::NewSwap(target, hashed_proof, swap) ); + + Ok(()) } /// Claim an atomic swap. @@ -253,13 +265,14 @@ decl_module! { /// - `proof`: Revealed proof of the claim. /// - `action`: Action defined in the swap, it must match the entry in blockchain. Otherwise /// the operation fails. This is used for weight calculation. - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(40_000_000) - .saturating_add((proof.len() as Weight).saturating_mul(100)) - .saturating_add(action.weight()) - ] - fn claim_swap( - origin, + #[pallet::weight( + T::DbWeight::get().reads_writes(1, 1) + .saturating_add(40_000_000) + .saturating_add((proof.len() as Weight).saturating_mul(100)) + .saturating_add(action.weight()) + )] + pub(crate) fn claim_swap( + origin: OriginFor, proof: Vec, action: T::SwapAction, ) -> DispatchResult { @@ -280,7 +293,7 @@ decl_module! { PendingSwaps::::remove(target.clone(), hashed_proof.clone()); Self::deposit_event( - RawEvent::SwapClaimed(target, hashed_proof, succeeded) + Event::SwapClaimed(target, hashed_proof, succeeded) ); Ok(()) @@ -292,12 +305,12 @@ decl_module! { /// /// - `target`: Target of the original atomic swap. /// - `hashed_proof`: Hashed proof of the original atomic swap. - #[weight = T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000)] - fn cancel_swap( - origin, + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] + pub(crate) fn cancel_swap( + origin: OriginFor, target: T::AccountId, hashed_proof: HashedProof, - ) { + ) -> DispatchResult { let source = ensure_signed(origin)?; let swap = PendingSwaps::::get(&target, hashed_proof) @@ -307,7 +320,7 @@ decl_module! { Error::::SourceMismatch, ); ensure!( - frame_system::Module::::block_number() >= swap.end_block, + frame_system::Pallet::::block_number() >= swap.end_block, Error::::DurationNotPassed, ); @@ -315,8 +328,10 @@ decl_module! { PendingSwaps::::remove(&target, hashed_proof.clone()); Self::deposit_event( - RawEvent::SwapCancelled(target, hashed_proof) + Event::SwapCancelled(target, hashed_proof) ); + + Ok(()) } } } diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index 977b17f8710e3..baa9a08957d4a 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -19,9 +19,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - AtomicSwap: pallet_atomic_swap::{Module, Call, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + AtomicSwap: pallet_atomic_swap::{Pallet, Call, Event}, } ); diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index 17484461cdeff..a9b91737235ae 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -17,8 +17,8 @@ //! # Aura Module //! -//! - [`aura::Config`](./trait.Config.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Pallet`] //! //! ## Overview //! @@ -130,7 +130,7 @@ impl Pallet { AURA_ENGINE_ID, ConsensusLog::AuthoritiesChange(new).encode() ); - >::deposit_log(log.into()); + >::deposit_log(log.into()); } fn initialize_authorities(authorities: &[T::AuthorityId]) { @@ -194,7 +194,7 @@ impl OneSessionHandler for Pallet { ConsensusLog::::OnDisabled(i as AuthorityIndex).encode(), ); - >::deposit_log(log.into()); + >::deposit_log(log.into()); } } diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index a5ef12f5935f1..481edbaff487f 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -34,9 +34,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, - Aura: pallet_aura::{Module, Call, Storage, Config}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Aura: pallet_aura::{Pallet, Call, Storage, Config}, } ); diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index cc3f41f59ed89..ca8f3eeff3d68 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -136,9 +136,9 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, - AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + AuthorityDiscovery: pallet_authority_discovery::{Pallet, Call, Config}, } ); diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 3d89ab24d01cf..286abc721cbba 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -234,7 +234,7 @@ impl Module { return author; } - let digest = >::digest(); + let digest = >::digest(); let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); if let Some(author) = T::FindAuthor::find_author(pre_runtime_digests) { ::Author::put(&author); @@ -245,7 +245,7 @@ impl Module { } fn verify_and_import_uncles(new_uncles: Vec) -> dispatch::DispatchResult { - let now = >::block_number(); + let now = >::block_number(); let mut uncles = ::Uncles::get(); uncles.push(UncleEntryItem::InclusionHeight(now)); @@ -278,7 +278,7 @@ impl Module { accumulator: &mut >::Accumulator, ) -> Result, dispatch::DispatchError> { - let now = >::block_number(); + let now = >::block_number(); let (minimum_height, maximum_height) = { let uncle_generations = T::UncleGenerations::get(); @@ -303,7 +303,7 @@ impl Module { { let parent_number = uncle.number().clone() - One::one(); - let parent_hash = >::block_hash(&parent_number); + let parent_hash = >::block_hash(&parent_number); if &parent_hash != uncle.parent_hash() { return Err(Error::::InvalidUncleParent.into()); } @@ -314,7 +314,7 @@ impl Module { } let duplicate = existing_uncles.into_iter().find(|h| **h == hash).is_some(); - let in_chain = >::block_hash(uncle.number()) == hash; + let in_chain = >::block_hash(uncle.number()) == hash; if duplicate || in_chain { return Err(Error::::UncleAlreadyIncluded.into()) @@ -413,8 +413,8 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Authorship: pallet_authorship::{Module, Call, Storage, Inherent}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Authorship: pallet_authorship::{Pallet, Call, Storage, Inherent}, } ); diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index 6c7bb508b53d1..f7bebce98acf3 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -37,7 +37,7 @@ pallet-offences = { version = "3.0.0", path = "../offences" } pallet-staking = { version = "3.0.0", path = "../staking" } pallet-staking-reward-curve = { version = "3.0.0", path = "../staking/reward-curve" } sp-core = { version = "3.0.0", path = "../../primitives/core" } -sp-election-providers = { version = "3.0.0", path = "../../primitives/election-providers" } +frame-election-provider-support = { version = "3.0.0", path = "../election-provider-support" } [features] default = ["std"] diff --git a/frame/babe/src/benchmarking.rs b/frame/babe/src/benchmarking.rs index 087cac2ed6cc6..145a82c4f8049 100644 --- a/frame/babe/src/benchmarking.rs +++ b/frame/babe/src/benchmarking.rs @@ -91,7 +91,7 @@ mod tests { let equivocation_proof = generate_equivocation_proof( offending_authority_index, offending_authority_pair, - CurrentSlot::get() + 1, + CurrentSlot::::get() + 1, ); println!("equivocation_proof: {:?}", equivocation_proof); diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index 14ba0f16cb9e2..154faa49f0b26 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -48,7 +48,7 @@ use sp_staking::{ }; use sp_std::prelude::*; -use crate::{Call, Module, Config}; +use crate::{Call, Pallet, Config}; /// A trait with utility methods for handling equivocation reports in BABE. /// The trait provides methods for reporting an offence triggered by a valid @@ -182,7 +182,7 @@ where /// A `ValidateUnsigned` implementation that restricts calls to `report_equivocation_unsigned` /// to local calls (i.e. extrinsics generated on this node) or that already in a block. This /// guarantees that only block authors can include unsigned equivocation reports. -impl frame_support::unsigned::ValidateUnsigned for Module { +impl frame_support::unsigned::ValidateUnsigned for Pallet { type Call = Call; fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { @@ -290,7 +290,7 @@ impl Offence fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational_approximation(3 * offenders_count, validator_set_count); + let x = Perbill::from_rational(3 * offenders_count, validator_set_count); // _ ^ 2 x.square() } diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 00bfa4f2656c9..fb1e32e5350b5 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -23,13 +23,10 @@ use codec::{Decode, Encode}; use frame_support::{ - decl_error, decl_module, decl_storage, dispatch::DispatchResultWithPostInfo, traits::{FindAuthor, Get, KeyOwnerProofSystem, OneSessionHandler, OnTimestampSet}, weights::{Pays, Weight}, - Parameter, }; -use frame_system::{ensure_none, ensure_root, ensure_signed}; use sp_application_crypto::Public; use sp_runtime::{ generic::DigestItem, @@ -64,52 +61,7 @@ pub use randomness::{ CurrentBlockRandomness, RandomnessFromOneEpochAgo, RandomnessFromTwoEpochsAgo, }; -pub trait Config: pallet_timestamp::Config { - /// The amount of time, in slots, that each epoch should last. - /// NOTE: Currently it is not possible to change the epoch duration after - /// the chain has started. Attempting to do so will brick block production. - type EpochDuration: Get; - - /// The expected average block time at which BABE should be creating - /// blocks. Since BABE is probabilistic it is not trivial to figure out - /// what the expected average block time should be based on the slot - /// duration and the security parameter `c` (where `1 - c` represents - /// the probability of a slot being empty). - type ExpectedBlockTime: Get; - - /// BABE requires some logic to be triggered on every block to query for whether an epoch - /// has ended and to perform the transition to the next epoch. - /// - /// Typically, the `ExternalTrigger` type should be used. An internal trigger should only be used - /// when no other module is responsible for changing authority set. - type EpochChangeTrigger: EpochChangeTrigger; - - /// The proof of key ownership, used for validating equivocation reports. - /// The proof must include the session index and validator count of the - /// session at which the equivocation occurred. - type KeyOwnerProof: Parameter + GetSessionNumber + GetValidatorCount; - - /// The identification of a key owner, used when reporting equivocations. - type KeyOwnerIdentification: Parameter; - - /// A system for proving ownership of keys, i.e. that a given key was part - /// of a validator set, needed for validating equivocation reports. - type KeyOwnerProofSystem: KeyOwnerProofSystem< - (KeyTypeId, AuthorityId), - Proof = Self::KeyOwnerProof, - IdentificationTuple = Self::KeyOwnerIdentification, - >; - - /// The equivocation handling subsystem, defines methods to report an - /// offence (after the equivocation has been validated) and for submitting a - /// transaction to report an equivocation (from an offchain context). - /// NOTE: when enabling equivocation handling (i.e. this type isn't set to - /// `()`) you must use this pallet's `ValidateUnsigned` in the runtime - /// definition. - type HandleEquivocation: HandleEquivocation; - - type WeightInfo: WeightInfo; -} +pub use pallet::*; pub trait WeightInfo { fn plan_config_change() -> Weight; @@ -137,11 +89,11 @@ pub struct SameAuthoritiesForever; impl EpochChangeTrigger for SameAuthoritiesForever { fn trigger(now: T::BlockNumber) { - if >::should_epoch_change(now) { - let authorities = >::authorities(); + if >::should_epoch_change(now) { + let authorities = >::authorities(); let next_authorities = authorities.clone(); - >::enact_epoch_change(authorities, next_authorities); + >::enact_epoch_change(authorities, next_authorities); } } } @@ -150,8 +102,70 @@ const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; type MaybeRandomness = Option; -decl_error! { - pub enum Error for Module { +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; + + /// The BABE Pallet + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: pallet_timestamp::Config { + /// The amount of time, in slots, that each epoch should last. + /// NOTE: Currently it is not possible to change the epoch duration after + /// the chain has started. Attempting to do so will brick block production. + #[pallet::constant] + type EpochDuration: Get; + + /// The expected average block time at which BABE should be creating + /// blocks. Since BABE is probabilistic it is not trivial to figure out + /// what the expected average block time should be based on the slot + /// duration and the security parameter `c` (where `1 - c` represents + /// the probability of a slot being empty). + #[pallet::constant] + type ExpectedBlockTime: Get; + + /// BABE requires some logic to be triggered on every block to query for whether an epoch + /// has ended and to perform the transition to the next epoch. + /// + /// Typically, the `ExternalTrigger` type should be used. An internal trigger should only be used + /// when no other module is responsible for changing authority set. + type EpochChangeTrigger: EpochChangeTrigger; + + /// The proof of key ownership, used for validating equivocation reports. + /// The proof must include the session index and validator count of the + /// session at which the equivocation occurred. + type KeyOwnerProof: Parameter + GetSessionNumber + GetValidatorCount; + + /// The identification of a key owner, used when reporting equivocations. + type KeyOwnerIdentification: Parameter; + + /// A system for proving ownership of keys, i.e. that a given key was part + /// of a validator set, needed for validating equivocation reports. + type KeyOwnerProofSystem: KeyOwnerProofSystem< + (KeyTypeId, AuthorityId), + Proof = Self::KeyOwnerProof, + IdentificationTuple = Self::KeyOwnerIdentification, + >; + + /// The equivocation handling subsystem, defines methods to report an + /// offence (after the equivocation has been validated) and for submitting a + /// transaction to report an equivocation (from an offchain context). + /// NOTE: when enabling equivocation handling (i.e. this type isn't set to + /// `()`) you must use this pallet's `ValidateUnsigned` in the runtime + /// definition. + type HandleEquivocation: HandleEquivocation; + + type WeightInfo: WeightInfo; + } + + #[pallet::error] + pub enum Error { /// An equivocation proof provided as part of an equivocation report is invalid. InvalidEquivocationProof, /// A key ownership proof provided as part of an equivocation report is invalid. @@ -159,150 +173,189 @@ decl_error! { /// A given equivocation report is valid but already previously reported. DuplicateOffenceReport, } -} -decl_storage! { - trait Store for Module as Babe { - /// Current epoch index. - pub EpochIndex get(fn epoch_index): u64; + /// Current epoch index. + #[pallet::storage] + #[pallet::getter(fn epoch_index)] + pub type EpochIndex = StorageValue<_, u64, ValueQuery>; - /// Current epoch authorities. - pub Authorities get(fn authorities): Vec<(AuthorityId, BabeAuthorityWeight)>; + /// Current epoch authorities. + #[pallet::storage] + #[pallet::getter(fn authorities)] + pub type Authorities = StorageValue<_, Vec<(AuthorityId, BabeAuthorityWeight)>, ValueQuery>; - /// The slot at which the first epoch actually started. This is 0 - /// until the first block of the chain. - pub GenesisSlot get(fn genesis_slot): Slot; + /// The slot at which the first epoch actually started. This is 0 + /// until the first block of the chain. + #[pallet::storage] + #[pallet::getter(fn genesis_slot)] + pub type GenesisSlot = StorageValue<_, Slot, ValueQuery>; - /// Current slot number. - pub CurrentSlot get(fn current_slot): Slot; + /// Current slot number. + #[pallet::storage] + #[pallet::getter(fn current_slot)] + pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; - /// The epoch randomness for the *current* epoch. - /// - /// # Security - /// - /// This MUST NOT be used for gambling, as it can be influenced by a - /// malicious validator in the short term. It MAY be used in many - /// cryptographic protocols, however, so long as one remembers that this - /// (like everything else on-chain) it is public. For example, it can be - /// used where a number is needed that cannot have been chosen by an - /// adversary, for purposes such as public-coin zero-knowledge proofs. - // NOTE: the following fields don't use the constants to define the - // array size because the metadata API currently doesn't resolve the - // variable to its underlying value. - pub Randomness get(fn randomness): schnorrkel::Randomness; - - /// Pending epoch configuration change that will be applied when the next epoch is enacted. - PendingEpochConfigChange: Option; - - /// Next epoch randomness. - NextRandomness: schnorrkel::Randomness; - - /// Next epoch authorities. - NextAuthorities: Vec<(AuthorityId, BabeAuthorityWeight)>; - - /// Randomness under construction. - /// - /// We make a tradeoff between storage accesses and list length. - /// We store the under-construction randomness in segments of up to - /// `UNDER_CONSTRUCTION_SEGMENT_LENGTH`. - /// - /// Once a segment reaches this length, we begin the next one. - /// We reset all segments and return to `0` at the beginning of every - /// epoch. - SegmentIndex build(|_| 0): u32; - - /// TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay. - UnderConstruction: map hasher(twox_64_concat) u32 => Vec; - - /// Temporary value (cleared at block finalization) which is `Some` - /// if per-block initialization has already been called for current block. - Initialized get(fn initialized): Option; - - /// Temporary value (cleared at block finalization) that includes the VRF output generated - /// at this block. This field should always be populated during block processing unless - /// secondary plain slots are enabled (which don't contain a VRF output). - AuthorVrfRandomness get(fn author_vrf_randomness): MaybeRandomness; - - /// The block numbers when the last and current epoch have started, respectively `N-1` and - /// `N`. - /// NOTE: We track this is in order to annotate the block number when a given pool of - /// entropy was fixed (i.e. it was known to chain observers). Since epochs are defined in - /// slots, which may be skipped, the block numbers may not line up with the slot numbers. - EpochStart: (T::BlockNumber, T::BlockNumber); - - /// How late the current block is compared to its parent. - /// - /// This entry is populated as part of block execution and is cleaned up - /// on block finalization. Querying this storage entry outside of block - /// execution context should always yield zero. - Lateness get(fn lateness): T::BlockNumber; - - /// The configuration for the current epoch. Should never be `None` as it is initialized in genesis. - EpochConfig: Option; - - /// The configuration for the next epoch, `None` if the config will not change - /// (you can fallback to `EpochConfig` instead in that case). - NextEpochConfig: Option; - } - add_extra_genesis { - config(authorities): Vec<(AuthorityId, BabeAuthorityWeight)>; - config(epoch_config): Option; - build(|config| { - Module::::initialize_authorities(&config.authorities); - EpochConfig::put(config.epoch_config.clone().expect("epoch_config must not be None")); - }) - } -} + /// The epoch randomness for the *current* epoch. + /// + /// # Security + /// + /// This MUST NOT be used for gambling, as it can be influenced by a + /// malicious validator in the short term. It MAY be used in many + /// cryptographic protocols, however, so long as one remembers that this + /// (like everything else on-chain) it is public. For example, it can be + /// used where a number is needed that cannot have been chosen by an + /// adversary, for purposes such as public-coin zero-knowledge proofs. + // NOTE: the following fields don't use the constants to define the + // array size because the metadata API currently doesn't resolve the + // variable to its underlying value. + #[pallet::storage] + #[pallet::getter(fn randomness)] + pub type Randomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + + /// Pending epoch configuration change that will be applied when the next epoch is enacted. + #[pallet::storage] + pub(super) type PendingEpochConfigChange = StorageValue<_, NextConfigDescriptor>; + + /// Next epoch randomness. + #[pallet::storage] + pub(super) type NextRandomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + + /// Next epoch authorities. + #[pallet::storage] + pub(super) type NextAuthorities = StorageValue< + _, + Vec<(AuthorityId, BabeAuthorityWeight)>, + ValueQuery, + >; -decl_module! { - /// The BABE Pallet - pub struct Module for enum Call where origin: T::Origin { - /// The number of **slots** that an epoch takes. We couple sessions to - /// epochs, i.e. we start a new session once the new epoch begins. - /// NOTE: Currently it is not possible to change the epoch duration - /// after the chain has started. Attempting to do so will brick block - /// production. - const EpochDuration: u64 = T::EpochDuration::get(); + /// Randomness under construction. + /// + /// We make a tradeoff between storage accesses and list length. + /// We store the under-construction randomness in segments of up to + /// `UNDER_CONSTRUCTION_SEGMENT_LENGTH`. + /// + /// Once a segment reaches this length, we begin the next one. + /// We reset all segments and return to `0` at the beginning of every + /// epoch. + #[pallet::storage] + pub(super) type SegmentIndex = StorageValue<_, u32, ValueQuery>; + + /// TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay. + #[pallet::storage] + pub(super) type UnderConstruction = StorageMap< + _, + Twox64Concat, + u32, + Vec, + ValueQuery, + >; - /// The expected average block time at which BABE should be creating - /// blocks. Since BABE is probabilistic it is not trivial to figure out - /// what the expected average block time should be based on the slot - /// duration and the security parameter `c` (where `1 - c` represents - /// the probability of a slot being empty). - const ExpectedBlockTime: T::Moment = T::ExpectedBlockTime::get(); + /// Temporary value (cleared at block finalization) which is `Some` + /// if per-block initialization has already been called for current block. + #[pallet::storage] + #[pallet::getter(fn initialized)] + pub(super) type Initialized = StorageValue<_, MaybeRandomness>; + + /// Temporary value (cleared at block finalization) that includes the VRF output generated + /// at this block. This field should always be populated during block processing unless + /// secondary plain slots are enabled (which don't contain a VRF output). + #[pallet::storage] + #[pallet::getter(fn author_vrf_randomness)] + pub(super) type AuthorVrfRandomness = StorageValue<_, MaybeRandomness, ValueQuery>; + + /// The block numbers when the last and current epoch have started, respectively `N-1` and + /// `N`. + /// NOTE: We track this is in order to annotate the block number when a given pool of + /// entropy was fixed (i.e. it was known to chain observers). Since epochs are defined in + /// slots, which may be skipped, the block numbers may not line up with the slot numbers. + #[pallet::storage] + pub(super) type EpochStart = StorageValue< + _, + (T::BlockNumber, T::BlockNumber), + ValueQuery, + >; + + /// How late the current block is compared to its parent. + /// + /// This entry is populated as part of block execution and is cleaned up + /// on block finalization. Querying this storage entry outside of block + /// execution context should always yield zero. + #[pallet::storage] + #[pallet::getter(fn lateness)] + pub(super) type Lateness = StorageValue<_, T::BlockNumber, ValueQuery>; + + /// The configuration for the current epoch. Should never be `None` as it is initialized in genesis. + #[pallet::storage] + pub(super) type EpochConfig = StorageValue<_, BabeEpochConfiguration>; + + /// The configuration for the next epoch, `None` if the config will not change + /// (you can fallback to `EpochConfig` instead in that case). + #[pallet::storage] + pub(super) type NextEpochConfig = StorageValue<_, BabeEpochConfiguration>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + pub epoch_config: Option, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { + authorities: Default::default(), + epoch_config: Default::default(), + } + } + } + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + SegmentIndex::::put(0); + Pallet::::initialize_authorities(&self.authorities); + EpochConfig::::put(self.epoch_config.clone().expect("epoch_config must not be None")); + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { /// Initialization - fn on_initialize(now: T::BlockNumber) -> Weight { + fn on_initialize(now: BlockNumberFor) -> Weight { Self::do_initialize(now); - 0 } /// Block finalization - fn on_finalize() { + fn on_finalize(_n: BlockNumberFor) { // at the end of the block, we can safely include the new VRF output // from this block into the under-construction randomness. If we've determined // that this block was the first in a new epoch, the changeover logic has // already occurred at this point, so the under-construction randomness // will only contain outputs from the right epoch. - if let Some(Some(randomness)) = Initialized::take() { + if let Some(Some(randomness)) = Initialized::::take() { Self::deposit_randomness(&randomness); } // The stored author generated VRF output is ephemeral. - AuthorVrfRandomness::kill(); + AuthorVrfRandomness::::kill(); // remove temporary "environment" entry from storage Lateness::::kill(); } + } + #[pallet::call] + impl Pallet { /// Report authority equivocation/misbehavior. This method will verify /// the equivocation proof and validate the given key ownership proof /// against the extracted offender. If both are valid, the offence will /// be reported. - #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] - fn report_equivocation( - origin, + #[pallet::weight(::WeightInfo::report_equivocation( + key_owner_proof.validator_count(), + ))] + pub fn report_equivocation( + origin: OriginFor, equivocation_proof: EquivocationProof, key_owner_proof: T::KeyOwnerProof, ) -> DispatchResultWithPostInfo { @@ -323,9 +376,11 @@ decl_module! { /// block authors will call it (validated in `ValidateUnsigned`), as such /// if the block author is defined it will be defined as the equivocation /// reporter. - #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] - fn report_equivocation_unsigned( - origin, + #[pallet::weight(::WeightInfo::report_equivocation( + key_owner_proof.validator_count(), + ))] + pub fn report_equivocation_unsigned( + origin: OriginFor, equivocation_proof: EquivocationProof, key_owner_proof: T::KeyOwnerProof, ) -> DispatchResultWithPostInfo { @@ -342,13 +397,14 @@ decl_module! { /// the next call to `enact_epoch_change`. The config will be activated one epoch after. /// Multiple calls to this method will replace any existing planned config change that had /// not been enacted yet. - #[weight = ::WeightInfo::plan_config_change()] - fn plan_config_change( - origin, + #[pallet::weight(::WeightInfo::plan_config_change())] + pub fn plan_config_change( + origin: OriginFor, config: NextConfigDescriptor, - ) { + ) -> DispatchResultWithPostInfo { ensure_root(origin)?; - PendingEpochConfigChange::put(config); + PendingEpochConfigChange::::put(config); + Ok(().into()) } } } @@ -356,7 +412,7 @@ decl_module! { /// A BABE public key pub type BabeKey = [u8; PUBLIC_KEY_LENGTH]; -impl FindAuthor for Module { +impl FindAuthor for Pallet { fn find_author<'a, I>(digests: I) -> Option where I: 'a + IntoIterator { @@ -371,15 +427,15 @@ impl FindAuthor for Module { } } -impl IsMember for Module { +impl IsMember for Pallet { fn is_member(authority_id: &AuthorityId) -> bool { - >::authorities() + >::authorities() .iter() .any(|id| &id.0 == authority_id) } } -impl pallet_session::ShouldEndSession for Module { +impl pallet_session::ShouldEndSession for Pallet { fn should_end_session(now: T::BlockNumber) -> bool { // it might be (and it is in current implementation) that session module is calling // should_end_session() from it's own on_initialize() handler @@ -391,7 +447,7 @@ impl pallet_session::ShouldEndSession for Module { } } -impl Module { +impl Pallet { /// Determine the BABE slot duration based on the Timestamp module configuration. pub fn slot_duration() -> T::Moment { // we double the minimum block-period so each author can always propose within @@ -411,7 +467,7 @@ impl Module { // the same randomness and validator set as signalled in the genesis, // so we don't rotate the epoch. now != One::one() && { - let diff = CurrentSlot::get().saturating_sub(Self::current_epoch_start()); + let diff = CurrentSlot::::get().saturating_sub(Self::current_epoch_start()); *diff >= T::EpochDuration::get() } } @@ -435,7 +491,7 @@ impl Module { pub fn next_expected_epoch_change(now: T::BlockNumber) -> Option { let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); next_slot - .checked_sub(*CurrentSlot::get()) + .checked_sub(*CurrentSlot::::get()) .map(|slots_remaining| { // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); @@ -457,12 +513,12 @@ impl Module { debug_assert!(Self::initialized().is_some()); // Update epoch index - let epoch_index = EpochIndex::get() + let epoch_index = EpochIndex::::get() .checked_add(1) .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); - EpochIndex::put(epoch_index); - Authorities::put(authorities); + EpochIndex::::put(epoch_index); + Authorities::::put(authorities); // Update epoch randomness. let next_epoch_index = epoch_index @@ -472,20 +528,20 @@ impl Module { // Returns randomness for the current epoch and computes the *next* // epoch randomness. let randomness = Self::randomness_change_epoch(next_epoch_index); - Randomness::put(randomness); + Randomness::::put(randomness); // Update the next epoch authorities. - NextAuthorities::put(&next_authorities); + NextAuthorities::::put(&next_authorities); // Update the start blocks of the previous and new current epoch. >::mutate(|(previous_epoch_start_block, current_epoch_start_block)| { *previous_epoch_start_block = sp_std::mem::take(current_epoch_start_block); - *current_epoch_start_block = >::block_number(); + *current_epoch_start_block = >::block_number(); }); // After we update the current epoch, we signal the *next* epoch change // so that nodes can track changes. - let next_randomness = NextRandomness::get(); + let next_randomness = NextRandomness::::get(); let next_epoch = NextEpochDescriptor { authorities: next_authorities, @@ -493,14 +549,14 @@ impl Module { }; Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); - if let Some(next_config) = NextEpochConfig::get() { - EpochConfig::put(next_config); + if let Some(next_config) = NextEpochConfig::::get() { + EpochConfig::::put(next_config); } - if let Some(pending_epoch_config_change) = PendingEpochConfigChange::take() { + if let Some(pending_epoch_config_change) = PendingEpochConfigChange::::take() { let next_epoch_config: BabeEpochConfiguration = pending_epoch_config_change.clone().into(); - NextEpochConfig::put(next_epoch_config); + NextEpochConfig::::put(next_epoch_config); Self::deposit_consensus(ConsensusLog::NextConfigData(pending_epoch_config_change)); } @@ -510,25 +566,25 @@ impl Module { /// give correct results after `do_initialize` of the first block /// in the chain (as its result is based off of `GenesisSlot`). pub fn current_epoch_start() -> Slot { - Self::epoch_start(EpochIndex::get()) + Self::epoch_start(EpochIndex::::get()) } /// Produces information about the current epoch. pub fn current_epoch() -> Epoch { Epoch { - epoch_index: EpochIndex::get(), + epoch_index: EpochIndex::::get(), start_slot: Self::current_epoch_start(), duration: T::EpochDuration::get(), authorities: Self::authorities(), randomness: Self::randomness(), - config: EpochConfig::get().expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed"), + config: EpochConfig::::get().expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed"), } } /// Produces information about the next epoch (which was already previously /// announced). pub fn next_epoch() -> Epoch { - let next_epoch_index = EpochIndex::get().checked_add(1).expect( + let next_epoch_index = EpochIndex::::get().checked_add(1).expect( "epoch index is u64; it is always only incremented by one; \ if u64 is not enough we should crash for safety; qed.", ); @@ -537,10 +593,10 @@ impl Module { epoch_index: next_epoch_index, start_slot: Self::epoch_start(next_epoch_index), duration: T::EpochDuration::get(), - authorities: NextAuthorities::get(), - randomness: NextRandomness::get(), - config: NextEpochConfig::get().unwrap_or_else(|| { - EpochConfig::get().expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed") + authorities: NextAuthorities::::get(), + randomness: NextRandomness::::get(), + config: NextEpochConfig::::get().unwrap_or_else(|| { + EpochConfig::::get().expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed") }), } } @@ -555,26 +611,26 @@ impl Module { .checked_mul(T::EpochDuration::get()) .expect(PROOF); - epoch_start.checked_add(*GenesisSlot::get()).expect(PROOF).into() + epoch_start.checked_add(*GenesisSlot::::get()).expect(PROOF).into() } fn deposit_consensus(new: U) { let log: DigestItem = DigestItem::Consensus(BABE_ENGINE_ID, new.encode()); - >::deposit_log(log.into()) + >::deposit_log(log.into()) } fn deposit_randomness(randomness: &schnorrkel::Randomness) { - let segment_idx = ::get(); - let mut segment = ::get(&segment_idx); + let segment_idx = SegmentIndex::::get(); + let mut segment = UnderConstruction::::get(&segment_idx); if segment.len() < UNDER_CONSTRUCTION_SEGMENT_LENGTH { // push onto current segment: not full. segment.push(*randomness); - ::insert(&segment_idx, &segment); + UnderConstruction::::insert(&segment_idx, &segment); } else { // move onto the next segment and update the index. let segment_idx = segment_idx + 1; - ::insert(&segment_idx, &vec![randomness.clone()]); - ::put(&segment_idx); + UnderConstruction::::insert(&segment_idx, &vec![randomness.clone()]); + SegmentIndex::::put(&segment_idx); } } @@ -586,7 +642,7 @@ impl Module { return; } - let maybe_pre_digest: Option = >::digest() + let maybe_pre_digest: Option = >::digest() .logs .iter() .filter_map(|s| s.as_pre_runtime()) @@ -603,9 +659,9 @@ impl Module { // on the first non-zero block (i.e. block #1) // this is where the first epoch (epoch #0) actually starts. // we need to adjust internal storage accordingly. - if *GenesisSlot::get() == 0 { - GenesisSlot::put(digest.slot()); - debug_assert_ne!(*GenesisSlot::get(), 0); + if *GenesisSlot::::get() == 0 { + GenesisSlot::::put(digest.slot()); + debug_assert_ne!(*GenesisSlot::::get(), 0); // deposit a log because this is the first block in epoch #0 // we use the same values as genesis because we haven't collected any @@ -622,11 +678,11 @@ impl Module { let current_slot = digest.slot(); // how many slots were skipped between current and last block - let lateness = current_slot.saturating_sub(CurrentSlot::get() + 1); + let lateness = current_slot.saturating_sub(CurrentSlot::::get() + 1); let lateness = T::BlockNumber::from(*lateness as u32); Lateness::::put(lateness); - CurrentSlot::put(current_slot); + CurrentSlot::::put(current_slot); let authority_index = digest.authority_index(); @@ -635,7 +691,7 @@ impl Module { .vrf_output() .and_then(|vrf_output| { // Reconstruct the bytes of VRFInOut using the authority id. - Authorities::get() + Authorities::::get() .get(authority_index as usize) .and_then(|author| { schnorrkel::PublicKey::from_bytes(author.0.as_slice()).ok() @@ -644,7 +700,7 @@ impl Module { let transcript = sp_consensus_babe::make_transcript( &Self::randomness(), current_slot, - EpochIndex::get(), + EpochIndex::::get(), ); vrf_output.0.attach_input_hash( @@ -661,11 +717,11 @@ impl Module { // For primary VRF output we place it in the `Initialized` storage // item and it'll be put onto the under-construction randomness later, // once we've decided which epoch this block is in. - Initialized::put(if is_primary { maybe_randomness } else { None }); + Initialized::::put(if is_primary { maybe_randomness } else { None }); // Place either the primary or secondary VRF output into the // `AuthorVrfRandomness` storage item. - AuthorVrfRandomness::put(maybe_randomness); + AuthorVrfRandomness::::put(maybe_randomness); // enact epoch change, if necessary. T::EpochChangeTrigger::trigger::(now) @@ -674,8 +730,8 @@ impl Module { /// Call this function exactly once when an epoch changes, to update the /// randomness. Returns the new randomness. fn randomness_change_epoch(next_epoch_index: u64) -> schnorrkel::Randomness { - let this_randomness = NextRandomness::get(); - let segment_idx: u32 = ::mutate(|s| sp_std::mem::replace(s, 0)); + let this_randomness = NextRandomness::::get(); + let segment_idx: u32 = SegmentIndex::::mutate(|s| sp_std::mem::replace(s, 0)); // overestimate to the segment being full. let rho_size = segment_idx.saturating_add(1) as usize * UNDER_CONSTRUCTION_SEGMENT_LENGTH; @@ -683,18 +739,18 @@ impl Module { let next_randomness = compute_randomness( this_randomness, next_epoch_index, - (0..segment_idx).flat_map(|i| ::take(&i)), + (0..segment_idx).flat_map(|i| UnderConstruction::::take(&i)), Some(rho_size), ); - NextRandomness::put(&next_randomness); + NextRandomness::::put(&next_randomness); this_randomness } fn initialize_authorities(authorities: &[(AuthorityId, BabeAuthorityWeight)]) { if !authorities.is_empty() { - assert!(Authorities::get().is_empty(), "Authorities are already initialized!"); - Authorities::put(authorities); - NextAuthorities::put(authorities); + assert!(Authorities::::get().is_empty(), "Authorities are already initialized!"); + Authorities::::put(authorities); + NextAuthorities::::put(authorities); } } @@ -714,7 +770,7 @@ impl Module { let validator_set_count = key_owner_proof.validator_count(); let session_index = key_owner_proof.session(); - let epoch_index = (*slot.saturating_sub(GenesisSlot::get()) / T::EpochDuration::get()) + let epoch_index = (*slot.saturating_sub(GenesisSlot::::get()) / T::EpochDuration::get()) .saturated_into::(); // check that the slot number is consistent with the session index @@ -763,7 +819,7 @@ impl Module { } } -impl OnTimestampSet for Module { +impl OnTimestampSet for Pallet { fn on_timestamp_set(moment: T::Moment) { let slot_duration = Self::slot_duration(); assert!(!slot_duration.is_zero(), "Babe slot duration cannot be zero."); @@ -771,20 +827,20 @@ impl OnTimestampSet for Module { let timestamp_slot = moment / slot_duration; let timestamp_slot = Slot::from(timestamp_slot.saturated_into::()); - assert!(CurrentSlot::get() == timestamp_slot, "Timestamp slot must match `CurrentSlot`"); + assert!(CurrentSlot::::get() == timestamp_slot, "Timestamp slot must match `CurrentSlot`"); } } -impl frame_support::traits::EstimateNextSessionRotation for Module { +impl frame_support::traits::EstimateNextSessionRotation for Pallet { fn average_session_length() -> T::BlockNumber { T::EpochDuration::get().saturated_into() } fn estimate_current_session_progress(_now: T::BlockNumber) -> (Option, Weight) { - let elapsed = CurrentSlot::get().saturating_sub(Self::current_epoch_start()) + 1; + let elapsed = CurrentSlot::::get().saturating_sub(Self::current_epoch_start()) + 1; ( - Some(Percent::from_rational_approximation( + Some(Percent::from_rational( *elapsed, T::EpochDuration::get(), )), @@ -802,17 +858,17 @@ impl frame_support::traits::EstimateNextSessionRotation frame_support::traits::Lateness for Module { +impl frame_support::traits::Lateness for Pallet { fn lateness(&self) -> T::BlockNumber { Self::lateness() } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Pallet { type Public = AuthorityId; } -impl OneSessionHandler for Module { +impl OneSessionHandler for Pallet { type Key = AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) @@ -890,7 +946,7 @@ pub mod migrations { let mut reads = 0; if let Some(pending_change) = OldNextEpochConfig::::get() { - PendingEpochConfigChange::put(pending_change); + PendingEpochConfigChange::::put(pending_change); writes += 1; } @@ -899,8 +955,8 @@ pub mod migrations { OldNextEpochConfig::::kill(); - EpochConfig::put(epoch_config.clone()); - NextEpochConfig::put(epoch_config); + EpochConfig::::put(epoch_config.clone()); + NextEpochConfig::::put(epoch_config); writes += 3; diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index c46b55c2c4ace..137f32b5e502c 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -27,7 +27,7 @@ use sp_runtime::{ }; use frame_system::InitKind; use frame_support::{ - parameter_types, StorageValue, + parameter_types, traits::{KeyOwnerProofSystem, OnInitialize}, weights::Weight, }; @@ -37,7 +37,7 @@ use sp_consensus_babe::{AuthorityId, AuthorityPair, Slot}; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; use sp_staking::SessionIndex; use pallet_staking::EraIndex; -use sp_election_providers::onchain; +use frame_election_provider_support::onchain; use pallet_session::historical as pallet_session_historical; type DummyValidatorId = u64; @@ -51,14 +51,14 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Historical: pallet_session_historical::{Module}, - Offences: pallet_offences::{Module, Call, Storage, Event}, - Babe: pallet_babe::{Module, Call, Storage, Config, ValidateUnsigned}, - Staking: pallet_staking::{Module, Call, Storage, Config, Event}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, - Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Historical: pallet_session_historical::{Pallet}, + Offences: pallet_offences::{Pallet, Call, Storage, Event}, + Babe: pallet_babe::{Pallet, Call, Storage, Config, ValidateUnsigned}, + Staking: pallet_staking::{Pallet, Call, Storage, Config, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, } ); @@ -104,7 +104,7 @@ where impl_opaque_keys! { pub struct MockSessionKeys { - pub babe_authority: super::Module, + pub babe_authority: super::Pallet, } } @@ -187,11 +187,13 @@ parameter_types! { impl onchain::Config for Test { type AccountId = ::AccountId; type BlockNumber = ::BlockNumber; + type BlockWeights = (); type Accuracy = Perbill; type DataProvider = Staking; } impl pallet_staking::Config for Test { + const MAX_NOMINATIONS: u32 = 16; type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type Event = Event; @@ -203,16 +205,10 @@ impl pallet_staking::Config for Test { type SlashDeferDuration = SlashDeferDuration; type SlashCancelOrigin = frame_system::EnsureRoot; type SessionInterface = Self; - type UnixTime = pallet_timestamp::Module; - type RewardCurve = RewardCurve; + type UnixTime = pallet_timestamp::Pallet; + type EraPayout = pallet_staking::ConvertCurve; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type NextNewSession = Session; - type ElectionLookahead = ElectionLookahead; - type Call = Call; - type UnsignedPriority = StakingUnsignedPriority; - type MaxIterations = (); - type MinSolutionScoreBump = (); - type OffchainSolutionWeightLimit = (); type ElectionProvider = onchain::OnChainSequentialPhragmen; type WeightInfo = (); } @@ -450,7 +446,7 @@ pub fn generate_equivocation_proof( use sp_consensus_babe::digests::CompatibleDigestItem; let current_block = System::block_number(); - let current_slot = CurrentSlot::get(); + let current_slot = CurrentSlot::::get(); let make_header = || { let parent_hash = System::parent_hash(); diff --git a/frame/babe/src/randomness.rs b/frame/babe/src/randomness.rs index 71412a962becf..a7e8b31577681 100644 --- a/frame/babe/src/randomness.rs +++ b/frame/babe/src/randomness.rs @@ -21,7 +21,7 @@ use super::{ AuthorVrfRandomness, Config, EpochStart, NextRandomness, Randomness, VRF_OUTPUT_LENGTH, }; -use frame_support::{traits::Randomness as RandomnessT, StorageValue}; +use frame_support::{traits::Randomness as RandomnessT}; use sp_runtime::traits::Hash; /// Randomness usable by consensus protocols that **depend** upon finality and take action @@ -117,7 +117,7 @@ impl RandomnessT for RandomnessFromTwoEpochs fn random(subject: &[u8]) -> (T::Hash, T::BlockNumber) { let mut subject = subject.to_vec(); subject.reserve(VRF_OUTPUT_LENGTH); - subject.extend_from_slice(&Randomness::get()[..]); + subject.extend_from_slice(&Randomness::::get()[..]); (T::Hashing::hash(&subject[..]), EpochStart::::get().0) } @@ -127,7 +127,7 @@ impl RandomnessT for RandomnessFromOneEpochA fn random(subject: &[u8]) -> (T::Hash, T::BlockNumber) { let mut subject = subject.to_vec(); subject.reserve(VRF_OUTPUT_LENGTH); - subject.extend_from_slice(&NextRandomness::get()[..]); + subject.extend_from_slice(&NextRandomness::::get()[..]); (T::Hashing::hash(&subject[..]), EpochStart::::get().1) } @@ -135,7 +135,7 @@ impl RandomnessT for RandomnessFromOneEpochA impl RandomnessT, T::BlockNumber> for CurrentBlockRandomness { fn random(subject: &[u8]) -> (Option, T::BlockNumber) { - let random = AuthorVrfRandomness::get().map(|random| { + let random = AuthorVrfRandomness::::get().map(|random| { let mut subject = subject.to_vec(); subject.reserve(VRF_OUTPUT_LENGTH); subject.extend_from_slice(&random); @@ -143,6 +143,6 @@ impl RandomnessT, T::BlockNumber> for CurrentBlockRan T::Hashing::hash(&subject[..]) }); - (random, >::block_number()) + (random, >::block_number()) } } diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 0ccc3db4df0b0..6aa80e9697339 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -19,7 +19,7 @@ use super::{Call, *}; use frame_support::{ - assert_err, assert_ok, + assert_err, assert_ok, assert_noop, traits::{Currency, EstimateNextSessionRotation, OnFinalize}, weights::{GetDispatchInfo, Pays}, }; @@ -92,11 +92,11 @@ fn first_block_epoch_zero_start() { Babe::on_finalize(1); let header = System::finalize(); - assert_eq!(SegmentIndex::get(), 0); - assert_eq!(UnderConstruction::get(0), vec![vrf_randomness]); + assert_eq!(SegmentIndex::::get(), 0); + assert_eq!(UnderConstruction::::get(0), vec![vrf_randomness]); assert_eq!(Babe::randomness(), [0; 32]); assert_eq!(Babe::author_vrf_randomness(), None); - assert_eq!(NextRandomness::get(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!(header.digest.logs.len(), 2); assert_eq!(pre_digest.logs.len(), 1); @@ -278,10 +278,10 @@ fn can_enact_next_config() { allowed_slots: sp_consensus_babe::AllowedSlots::PrimarySlots, }; - EpochConfig::put(current_config); - NextEpochConfig::put(next_config.clone()); + EpochConfig::::put(current_config); + NextEpochConfig::::put(next_config.clone()); - assert_eq!(NextEpochConfig::get(), Some(next_config.clone())); + assert_eq!(NextEpochConfig::::get(), Some(next_config.clone())); Babe::plan_config_change( Origin::root(), @@ -295,8 +295,8 @@ fn can_enact_next_config() { Babe::on_finalize(9); let header = System::finalize(); - assert_eq!(EpochConfig::get(), Some(next_config)); - assert_eq!(NextEpochConfig::get(), Some(next_next_config.clone())); + assert_eq!(EpochConfig::::get(), Some(next_config)); + assert_eq!(NextEpochConfig::::get(), Some(next_next_config.clone())); let consensus_log = sp_consensus_babe::ConsensusLog::NextConfigData( NextConfigDescriptor::V1 { @@ -325,14 +325,14 @@ fn only_root_can_enact_config_change() { next_config.clone(), ); - assert_eq!(res, Err(DispatchError::BadOrigin)); + assert_noop!(res, DispatchError::BadOrigin); let res = Babe::plan_config_change( Origin::signed(1), next_config.clone(), ); - assert_eq!(res, Err(DispatchError::BadOrigin)); + assert_noop!(res, DispatchError::BadOrigin); let res = Babe::plan_config_change( Origin::root(), @@ -346,7 +346,7 @@ fn only_root_can_enact_config_change() { #[test] fn can_fetch_current_and_next_epoch_data() { new_test_ext(5).execute_with(|| { - EpochConfig::put(BabeEpochConfiguration { + EpochConfig::::put(BabeEpochConfiguration { c: (1, 4), allowed_slots: sp_consensus_babe::AllowedSlots::PrimarySlots, }); @@ -356,7 +356,6 @@ fn can_fetch_current_and_next_epoch_data() { Babe::current_epoch().authorities, Babe::next_epoch().authorities, ); - // 1 era = 3 epochs // 1 epoch = 3 slots // Eras start from 0. @@ -444,7 +443,7 @@ fn report_equivocation_current_session_works() { let equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); // create the key ownership proof @@ -518,7 +517,7 @@ fn report_equivocation_old_session_works() { let equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); // create the key ownership proof @@ -584,7 +583,7 @@ fn report_equivocation_invalid_key_owner_proof() { let equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); // create the key ownership proof @@ -664,7 +663,7 @@ fn report_equivocation_invalid_equivocation_proof() { let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); equivocation_proof.second_header = equivocation_proof.first_header.clone(); assert_invalid_equivocation(equivocation_proof); @@ -673,7 +672,7 @@ fn report_equivocation_invalid_equivocation_proof() { let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); equivocation_proof.first_header.digest_mut().logs.remove(0); assert_invalid_equivocation(equivocation_proof); @@ -682,7 +681,7 @@ fn report_equivocation_invalid_equivocation_proof() { let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); equivocation_proof.first_header.digest_mut().logs.remove(1); assert_invalid_equivocation(equivocation_proof); @@ -691,7 +690,7 @@ fn report_equivocation_invalid_equivocation_proof() { let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); equivocation_proof.slot = Slot::from(0); assert_invalid_equivocation(equivocation_proof.clone()); @@ -701,7 +700,7 @@ fn report_equivocation_invalid_equivocation_proof() { let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get() + 1, + CurrentSlot::::get() + 1, ); // use the header from the previous equivocation generated @@ -714,7 +713,7 @@ fn report_equivocation_invalid_equivocation_proof() { let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get() + 1, + CurrentSlot::::get() + 1, ); // replace the seal digest with the digest from the @@ -753,7 +752,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { let equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); let key = ( @@ -775,7 +774,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { ); // the transaction is valid when passed as local - let tx_tag = (offending_authority_pair.public(), CurrentSlot::get()); + let tx_tag = (offending_authority_pair.public(), CurrentSlot::::get()); assert_eq!( ::validate_unsigned( TransactionSource::Local, @@ -848,7 +847,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // generate an equivocation proof. let equivocation_proof = - generate_equivocation_proof(0, &offending_authority_pair, CurrentSlot::get()); + generate_equivocation_proof(0, &offending_authority_pair, CurrentSlot::::get()); // create the key ownership proof. let key_owner_proof = Historical::prove(( @@ -941,7 +940,7 @@ fn add_epoch_configurations_migration_works() { &[], ).is_none()); - assert_eq!(EpochConfig::get(), Some(current_epoch)); - assert_eq!(PendingEpochConfigChange::get(), Some(next_config_descriptor)); + assert_eq!(EpochConfig::::get(), Some(current_epoch)); + assert_eq!(PendingEpochConfigChange::::get(), Some(next_config_descriptor)); }); } diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index c7cb67403d749..62959c4f1dc4a 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -25,7 +25,7 @@ use frame_system::RawOrigin; use frame_benchmarking::{benchmarks_instance_pallet, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; -use crate::Module as Balances; +use crate::Pallet as Balances; const SEED: u32 = 0; // existential deposit multiplier diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index cc7b6351c2584..35841c504adf9 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -39,7 +39,7 @@ //! ### Terminology //! //! - **Existential Deposit:** The minimum balance required to create or keep an account open. This prevents -//! "dust accounts" from filling storage. When the free plus the reserved balance (i.e. the total balance) +//! "dust accounts" from filling storage. When the free plus the reserved balance (i.e. the total balance) //! fall below this, then the account is said to be dead; and it loses its functionality as well as any //! prior history and all information on it is removed from the chain's state. //! No account should ever have a total balance that is strictly between 0 and the existential @@ -164,7 +164,8 @@ use frame_support::{ Currency, OnUnbalanced, TryDrop, StoredMap, WithdrawReasons, LockIdentifier, LockableCurrency, ExistenceRequirement, Imbalance, SignedImbalance, ReservableCurrency, Get, ExistenceRequirement::KeepAlive, - ExistenceRequirement::AllowDeath, BalanceStatus as Status, + ExistenceRequirement::AllowDeath, + tokens::{fungible, DepositConsequence, WithdrawConsequence, BalanceStatus as Status} } }; #[cfg(feature = "std")] @@ -624,7 +625,7 @@ pub struct DustCleaner, I: 'static = ()>(Option<(T::AccountId, Nega impl, I: 'static> Drop for DustCleaner { fn drop(&mut self) { if let Some((who, dust)) = self.0.take() { - Module::::deposit_event(Event::DustLost(who, dust.peek())); + Pallet::::deposit_event(Event::DustLost(who, dust.peek())); T::DustRemoval::on_unbalanced(dust); } } @@ -682,6 +683,78 @@ impl, I: 'static> Pallet { } } + fn deposit_consequence( + _who: &T::AccountId, + amount: T::Balance, + account: &AccountData, + ) -> DepositConsequence { + if amount.is_zero() { return DepositConsequence::Success } + + if TotalIssuance::::get().checked_add(&amount).is_none() { + return DepositConsequence::Overflow + } + + let new_total_balance = match account.total().checked_add(&amount) { + Some(x) => x, + None => return DepositConsequence::Overflow, + }; + + if new_total_balance < T::ExistentialDeposit::get() { + return DepositConsequence::BelowMinimum + } + + // NOTE: We assume that we are a provider, so don't need to do any checks in the + // case of account creation. + + DepositConsequence::Success + } + + fn withdraw_consequence( + who: &T::AccountId, + amount: T::Balance, + account: &AccountData, + ) -> WithdrawConsequence { + if amount.is_zero() { return WithdrawConsequence::Success } + + if TotalIssuance::::get().checked_sub(&amount).is_none() { + return WithdrawConsequence::Underflow + } + + let new_total_balance = match account.total().checked_sub(&amount) { + Some(x) => x, + None => return WithdrawConsequence::NoFunds, + }; + + // Provider restriction - total account balance cannot be reduced to zero if it cannot + // sustain the loss of a provider reference. + // NOTE: This assumes that the pallet is a provider (which is true). Is this ever changes, + // then this will need to adapt accordingly. + let ed = T::ExistentialDeposit::get(); + let success = if new_total_balance < ed { + if frame_system::Pallet::::can_dec_provider(who) { + WithdrawConsequence::ReducedToZero(new_total_balance) + } else { + return WithdrawConsequence::WouldDie + } + } else { + WithdrawConsequence::Success + }; + + // Enough free funds to have them be reduced. + let new_free_balance = match account.free.checked_sub(&amount) { + Some(b) => b, + None => return WithdrawConsequence::NoFunds, + }; + + // Eventual free funds must be no less than the frozen balance. + let min_balance = account.frozen(Reasons::All); + if new_free_balance < min_balance { + return WithdrawConsequence::Frozen + } + + success + } + /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce /// `ExistentialDeposit` law, annulling the account as needed. /// @@ -692,7 +765,7 @@ impl, I: 'static> Pallet { /// the caller will do this. pub fn mutate_account( who: &T::AccountId, - f: impl FnOnce(&mut AccountData) -> R + f: impl FnOnce(&mut AccountData) -> R, ) -> Result { Self::try_mutate_account(who, |a, _| -> Result { Ok(f(a)) }) } @@ -708,7 +781,7 @@ impl, I: 'static> Pallet { /// the caller will do this. fn try_mutate_account>( who: &T::AccountId, - f: impl FnOnce(&mut AccountData, bool) -> Result + f: impl FnOnce(&mut AccountData, bool) -> Result, ) -> Result { Self::try_mutate_account_with_dust(who, f) .map(|(result, dust_cleaner)| { @@ -732,7 +805,7 @@ impl, I: 'static> Pallet { /// the caller will do this. fn try_mutate_account_with_dust>( who: &T::AccountId, - f: impl FnOnce(&mut AccountData, bool) -> Result + f: impl FnOnce(&mut AccountData, bool) -> Result, ) -> Result<(R, DustCleaner), E> { let result = T::AccountStore::try_mutate_exists(who, |maybe_account| { let is_new = maybe_account.is_none(); @@ -763,7 +836,7 @@ impl, I: 'static> Pallet { ); } // No way this can fail since we do not alter the existential balances. - let _ = Self::mutate_account(who, |b| { + let res = Self::mutate_account(who, |b| { b.misc_frozen = Zero::zero(); b.fee_frozen = Zero::zero(); for l in locks.iter() { @@ -775,6 +848,7 @@ impl, I: 'static> Pallet { } } }); + debug_assert!(res.is_ok()); let existed = Locks::::contains_key(who); if locks.is_empty() { @@ -800,6 +874,192 @@ impl, I: 'static> Pallet { } } } + + + /// Move the reserved balance of one account into the balance of another, according to `status`. + /// + /// Is a no-op if: + /// - the value to be moved is zero; or + /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. + fn do_transfer_reserved( + slashed: &T::AccountId, + beneficiary: &T::AccountId, + value: T::Balance, + best_effort: bool, + status: Status, + ) -> Result { + if value.is_zero() { return Ok(Zero::zero()) } + + if slashed == beneficiary { + return match status { + Status::Free => Ok(Self::unreserve(slashed, value)), + Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), + }; + } + + let ((actual, _maybe_one_dust), _maybe_other_dust) = Self::try_mutate_account_with_dust( + beneficiary, + |to_account, is_new| -> Result<(T::Balance, DustCleaner), DispatchError> { + ensure!(!is_new, Error::::DeadAccount); + Self::try_mutate_account_with_dust( + slashed, + |from_account, _| -> Result { + let actual = cmp::min(from_account.reserved, value); + ensure!(best_effort || actual == value, Error::::InsufficientBalance); + match status { + Status::Free => to_account.free = to_account.free + .checked_add(&actual) + .ok_or(Error::::Overflow)?, + Status::Reserved => to_account.reserved = to_account.reserved + .checked_add(&actual) + .ok_or(Error::::Overflow)?, + } + from_account.reserved -= actual; + Ok(actual) + } + ) + } + )?; + + Self::deposit_event(Event::ReserveRepatriated(slashed.clone(), beneficiary.clone(), actual, status)); + Ok(actual) + } +} + +impl, I: 'static> fungible::Inspect for Pallet { + type Balance = T::Balance; + + fn total_issuance() -> Self::Balance { + TotalIssuance::::get() + } + fn minimum_balance() -> Self::Balance { + T::ExistentialDeposit::get() + } + fn balance(who: &T::AccountId) -> Self::Balance { + Self::account(who).total() + } + fn reducible_balance(who: &T::AccountId, keep_alive: bool) -> Self::Balance { + let a = Self::account(who); + // Liquid balance is what is neither reserved nor locked/frozen. + let liquid = a.free.saturating_sub(a.fee_frozen.max(a.misc_frozen)); + if frame_system::Pallet::::can_dec_provider(who) && !keep_alive { + liquid + } else { + // `must_remain_to_exist` is the part of liquid balance which must remain to keep total over + // ED. + let must_remain_to_exist = T::ExistentialDeposit::get().saturating_sub(a.total() - liquid); + liquid.saturating_sub(must_remain_to_exist) + } + } + fn can_deposit(who: &T::AccountId, amount: Self::Balance) -> DepositConsequence { + Self::deposit_consequence(who, amount, &Self::account(who)) + } + fn can_withdraw(who: &T::AccountId, amount: Self::Balance) -> WithdrawConsequence { + Self::withdraw_consequence(who, amount, &Self::account(who)) + } +} + +impl, I: 'static> fungible::Mutate for Pallet { + fn mint_into(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { + if amount.is_zero() { return Ok(()) } + Self::try_mutate_account(who, |account, _is_new| -> DispatchResult { + Self::deposit_consequence(who, amount, &account).into_result()?; + account.free += amount; + Ok(()) + })?; + TotalIssuance::::mutate(|t| *t += amount); + Ok(()) + } + + fn burn_from(who: &T::AccountId, amount: Self::Balance) -> Result { + if amount.is_zero() { return Ok(Self::Balance::zero()); } + let actual = Self::try_mutate_account(who, |account, _is_new| -> Result { + let extra = Self::withdraw_consequence(who, amount, &account).into_result()?; + let actual = amount + extra; + account.free -= actual; + Ok(actual) + })?; + TotalIssuance::::mutate(|t| *t -= actual); + Ok(actual) + } +} + +impl, I: 'static> fungible::Transfer for Pallet { + fn transfer( + source: &T::AccountId, + dest: &T::AccountId, + amount: T::Balance, + keep_alive: bool, + ) -> Result { + let er = if keep_alive { KeepAlive } else { AllowDeath }; + >::transfer(source, dest, amount, er) + .map(|_| amount) + } +} + +impl, I: 'static> fungible::Unbalanced for Pallet { + fn set_balance(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { + Self::mutate_account(who, |account| account.free = amount)?; + Ok(()) + } + + fn set_total_issuance(amount: Self::Balance) { + TotalIssuance::::mutate(|t| *t = amount); + } +} + +impl, I: 'static> fungible::InspectHold for Pallet { + fn balance_on_hold(who: &T::AccountId) -> T::Balance { + Self::account(who).reserved + } + fn can_hold(who: &T::AccountId, amount: T::Balance) -> bool { + let a = Self::account(who); + let min_balance = T::ExistentialDeposit::get().max(a.frozen(Reasons::All)); + if a.reserved.checked_add(&amount).is_none() { return false } + // We require it to be min_balance + amount to ensure that the full reserved funds may be + // slashed without compromising locked funds or destroying the account. + let required_free = match min_balance.checked_add(&amount) { + Some(x) => x, + None => return false, + }; + a.free >= required_free + } +} +impl, I: 'static> fungible::MutateHold for Pallet { + fn hold(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { + if amount.is_zero() { return Ok(()) } + ensure!(Self::can_reserve(who, amount), Error::::InsufficientBalance); + Self::mutate_account(who, |a| { + a.free -= amount; + a.reserved += amount; + })?; + Ok(()) + } + fn release(who: &T::AccountId, amount: Self::Balance, best_effort: bool) + -> Result + { + if amount.is_zero() { return Ok(amount) } + // Done on a best-effort basis. + Self::try_mutate_account(who, |a, _| { + let new_free = a.free.saturating_add(amount.min(a.reserved)); + let actual = new_free - a.free; + ensure!(best_effort || actual == amount, Error::::InsufficientBalance); + // ^^^ Guaranteed to be <= amount and <= a.reserved + a.free = new_free; + a.reserved = a.reserved.saturating_sub(actual.clone()); + Ok(actual) + }) + } + fn transfer_held( + source: &T::AccountId, + dest: &T::AccountId, + amount: Self::Balance, + best_effort: bool, + on_hold: bool, + ) -> Result { + let status = if on_hold { Status::Reserved } else { Status::Free }; + Self::do_transfer_reserved(source, dest, amount, best_effort, status) + } } // wrapping these imbalances in a private module is necessary to ensure absolute privacy @@ -810,6 +1070,7 @@ mod imbalances { TryDrop, RuntimeDebug, }; use sp_std::mem; + use frame_support::traits::SameOrOther; /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been created without any equal and opposite accounting. @@ -843,6 +1104,12 @@ mod imbalances { } } + impl, I: 'static> Default for PositiveImbalance { + fn default() -> Self { + Self::zero() + } + } + impl, I: 'static> Imbalance for PositiveImbalance { type Opposite = NegativeImbalance; @@ -873,14 +1140,16 @@ mod imbalances { self.0 = self.0.saturating_add(other.0); mem::forget(other); } - fn offset(self, other: Self::Opposite) -> result::Result { + fn offset(self, other: Self::Opposite) -> SameOrOther { let (a, b) = (self.0, other.0); mem::forget((self, other)); - if a >= b { - Ok(Self(a - b)) + if a > b { + SameOrOther::Same(Self(a - b)) + } else if b > a { + SameOrOther::Other(NegativeImbalance::new(b - a)) } else { - Err(NegativeImbalance::new(b - a)) + SameOrOther::None } } fn peek(&self) -> T::Balance { @@ -894,6 +1163,12 @@ mod imbalances { } } + impl, I: 'static> Default for NegativeImbalance { + fn default() -> Self { + Self::zero() + } + } + impl, I: 'static> Imbalance for NegativeImbalance { type Opposite = PositiveImbalance; @@ -924,14 +1199,16 @@ mod imbalances { self.0 = self.0.saturating_add(other.0); mem::forget(other); } - fn offset(self, other: Self::Opposite) -> result::Result { + fn offset(self, other: Self::Opposite) -> SameOrOther { let (a, b) = (self.0, other.0); mem::forget((self, other)); - if a >= b { - Ok(Self(a - b)) + if a > b { + SameOrOther::Same(Self(a - b)) + } else if b > a { + SameOrOther::Other(PositiveImbalance::new(b - a)) } else { - Err(PositiveImbalance::new(b - a)) + SameOrOther::None } } fn peek(&self) -> T::Balance { @@ -1362,40 +1639,8 @@ impl, I: 'static> ReservableCurrency for Pallet value: Self::Balance, status: Status, ) -> Result { - if value.is_zero() { return Ok(Zero::zero()) } - - if slashed == beneficiary { - return match status { - Status::Free => Ok(Self::unreserve(slashed, value)), - Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), - }; - } - - let ((actual, _maybe_one_dust), _maybe_other_dust) = Self::try_mutate_account_with_dust( - beneficiary, - |to_account, is_new| -> Result<(Self::Balance, DustCleaner), DispatchError> { - ensure!(!is_new, Error::::DeadAccount); - Self::try_mutate_account_with_dust( - slashed, - |from_account, _| -> Result { - let actual = cmp::min(from_account.reserved, value); - match status { - Status::Free => to_account.free = to_account.free - .checked_add(&actual) - .ok_or(Error::::Overflow)?, - Status::Reserved => to_account.reserved = to_account.reserved - .checked_add(&actual) - .ok_or(Error::::Overflow)?, - } - from_account.reserved -= actual; - Ok(actual) - } - ) - } - )?; - - Self::deposit_event(Event::ReserveRepatriated(slashed.clone(), beneficiary.clone(), actual, status)); - Ok(value - actual) + let actual = Self::do_transfer_reserved(slashed, beneficiary, value, true, status)?; + Ok(value.saturating_sub(actual)) } } diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 776cda140efb8..3eb70e401e7f8 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -55,7 +55,7 @@ macro_rules! decl_tests { } fn last_event() -> Event { - system::Module::::events().pop().expect("Event expected").event + system::Pallet::::events().pop().expect("Event expected").event } #[test] @@ -684,7 +684,7 @@ macro_rules! decl_tests { let _ = Balances::deposit_creating(&1, 100); System::set_block_number(2); - let _ = Balances::reserve(&1, 10); + assert_ok!(Balances::reserve(&1, 10)); assert_eq!( last_event(), @@ -692,7 +692,7 @@ macro_rules! decl_tests { ); System::set_block_number(3); - let _ = Balances::unreserve(&1, 5); + assert!(Balances::unreserve(&1, 5).is_zero()); assert_eq!( last_event(), @@ -700,7 +700,7 @@ macro_rules! decl_tests { ); System::set_block_number(4); - let _ = Balances::unreserve(&1, 6); + assert_eq!(Balances::unreserve(&1, 6), 1); // should only unreserve 5 assert_eq!( diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 14dfd0c4b33d6..90bcaf1a480ad 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -30,7 +30,7 @@ use frame_support::weights::{Weight, DispatchInfo, IdentityFee}; use pallet_transaction_payment::CurrencyAdapter; use crate::{ self as pallet_balances, - Module, Config, decl_tests, + Pallet, Config, decl_tests, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -41,8 +41,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, } ); @@ -80,7 +80,7 @@ parameter_types! { pub const TransactionByteFee: u64 = 1; } impl pallet_transaction_payment::Config for Test { - type OnChargeTransaction = CurrencyAdapter, ()>; + type OnChargeTransaction = CurrencyAdapter, ()>; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 02088e88b98ec..10ea74d8887bc 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -30,7 +30,7 @@ use frame_support::traits::StorageMapShim; use frame_support::weights::{Weight, DispatchInfo, IdentityFee}; use crate::{ self as pallet_balances, - Module, Config, decl_tests, + Pallet, Config, decl_tests, }; use pallet_transaction_payment::CurrencyAdapter; @@ -43,8 +43,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, } ); @@ -82,7 +82,7 @@ parameter_types! { pub const TransactionByteFee: u64 = 1; } impl pallet_transaction_payment::Config for Test { - type OnChargeTransaction = CurrencyAdapter, ()>; + type OnChargeTransaction = CurrencyAdapter, ()>; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index 020c514b6317c..547c7dd7cfb72 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -30,7 +30,7 @@ use frame_support::traits::StorageMapShim; use frame_support::weights::{IdentityFee}; use crate::{ self as pallet_balances, - Module, Config, + Pallet, Config, }; use pallet_transaction_payment::CurrencyAdapter; @@ -47,7 +47,7 @@ type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; fn last_event() -> Event { - system::Module::::events().pop().expect("Event expected").event + system::Pallet::::events().pop().expect("Event expected").event } frame_support::construct_runtime!( @@ -56,8 +56,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, } ); @@ -95,7 +95,7 @@ parameter_types! { pub const TransactionByteFee: u64 = 1; } impl pallet_transaction_payment::Config for Test { - type OnChargeTransaction = CurrencyAdapter, ()>; + type OnChargeTransaction = CurrencyAdapter, ()>; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index c2f60a5e13c41..b134e79ca2450 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -676,7 +676,7 @@ macro_rules! impl_benchmark { ( $( $name_extra:ident ),* ) ) => { impl, $instance: $instance_bound )? > - $crate::Benchmarking<$crate::BenchmarkResults> for Module + $crate::Benchmarking<$crate::BenchmarkResults> for Pallet where T: frame_system::Config, $( $where_clause )* { fn benchmarks(extra: bool) -> $crate::Vec<&'static [u8]> { @@ -744,8 +744,8 @@ macro_rules! impl_benchmark { >::instance(&selected_benchmark, c, verify)?; // Set the block number to at least 1 so events are deposited. - if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { - frame_system::Module::::set_block_number(1u32.into()); + if $crate::Zero::is_zero(&frame_system::Pallet::::block_number()) { + frame_system::Pallet::::set_block_number(1u32.into()); } // Commit the externalities to the database, flushing the DB cache. @@ -915,8 +915,8 @@ macro_rules! impl_benchmark_test { >::instance(&selected_benchmark, &c, true)?; // Set the block number to at least 1 so events are deposited. - if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { - frame_system::Module::::set_block_number(1u32.into()); + if $crate::Zero::is_zero(&frame_system::Pallet::::block_number()) { + frame_system::Pallet::::set_block_number(1u32.into()); } // Run execution + verification @@ -961,7 +961,7 @@ macro_rules! impl_benchmark_test { /// When called in `pallet_example` as /// /// ```rust,ignore -/// impl_benchmark_test_suite!(Module, crate::tests::new_test_ext(), crate::tests::Test); +/// impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); /// ``` /// /// It expands to the equivalent of: @@ -1019,11 +1019,11 @@ macro_rules! impl_benchmark_test { /// } /// /// mod tests { -/// // because of macro syntax limitations, neither Module nor benches can be paths, but both have +/// // because of macro syntax limitations, neither Pallet nor benches can be paths, but both have /// // to be idents in the scope of `impl_benchmark_test_suite`. -/// use crate::{benches, Module}; +/// use crate::{benches, Pallet}; /// -/// impl_benchmark_test_suite!(Module, new_test_ext(), Test, benchmarks_path = benches); +/// impl_benchmark_test_suite!(Pallet, new_test_ext(), Test, benchmarks_path = benches); /// /// // new_test_ext and the Test item are defined later in this module /// } diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 8431f3e46c277..ac0a208543058 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -76,8 +76,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - TestPallet: pallet_test::{Module, Call, Storage}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + TestPallet: pallet_test::{Pallet, Call, Storage}, } ); @@ -133,7 +133,7 @@ mod benchmarks { use crate::{BenchmarkingSetup, BenchmarkParameter, account}; // Additional used internally by the benchmark macro. - use super::pallet_test::{Call, Config, Module}; + use super::pallet_test::{Call, Config, Pallet}; crate::benchmarks!{ where_clause { diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 632f951f05e19..cb7933079763a 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -84,7 +84,7 @@ fn setup_pot_account() { } fn assert_last_event(generic_event: ::Event) { - let events = frame_system::Module::::events(); + let events = frame_system::Pallet::::events(); let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; @@ -122,7 +122,7 @@ benchmarks! { let (curator_lookup, bounty_id) = create_bounty::()?; Bounties::::on_initialize(T::BlockNumber::zero()); let bounty_id = BountyCount::get() - 1; - frame_system::Module::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); + frame_system::Pallet::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), bounty_id) @@ -159,7 +159,7 @@ benchmarks! { let beneficiary = T::Lookup::unlookup(beneficiary_account.clone()); Bounties::::award_bounty(RawOrigin::Signed(curator.clone()).into(), bounty_id, beneficiary)?; - frame_system::Module::::set_block_number(T::BountyDepositPayoutDelay::get()); + frame_system::Pallet::::set_block_number(T::BountyDepositPayoutDelay::get()); ensure!(T::Currency::free_balance(&beneficiary_account).is_zero(), "Beneficiary already has balance"); }: _(RawOrigin::Signed(curator), bounty_id) diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index ba0d4a5b16cb4..dafa7cd61d054 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -424,7 +424,7 @@ decl_module! { // If the sender is not the curator, and the curator is inactive, // slash the curator. if sender != *curator { - let block_number = system::Module::::block_number(); + let block_number = system::Pallet::::block_number(); if *update_due < block_number { slash_curator(curator, &mut bounty.curator_deposit); // Continue to change bounty status below... @@ -435,7 +435,8 @@ decl_module! { } else { // Else this is the curator, willingly giving up their role. // Give back their deposit. - let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); + let err_amount = T::Currency::unreserve(&curator, bounty.curator_deposit); + debug_assert!(err_amount.is_zero()); // Continue to change bounty status below... } }, @@ -479,7 +480,7 @@ decl_module! { T::Currency::reserve(curator, deposit)?; bounty.curator_deposit = deposit; - let update_due = system::Module::::block_number() + T::BountyUpdatePeriod::get(); + let update_due = system::Pallet::::block_number() + T::BountyUpdatePeriod::get(); bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; Ok(()) @@ -518,7 +519,7 @@ decl_module! { bounty.status = BountyStatus::PendingPayout { curator: signer, beneficiary: beneficiary.clone(), - unlock_at: system::Module::::block_number() + T::BountyDepositPayoutDelay::get(), + unlock_at: system::Pallet::::block_number() + T::BountyDepositPayoutDelay::get(), }; Ok(()) @@ -543,14 +544,18 @@ decl_module! { Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { let bounty = maybe_bounty.take().ok_or(Error::::InvalidIndex)?; if let BountyStatus::PendingPayout { curator, beneficiary, unlock_at } = bounty.status { - ensure!(system::Module::::block_number() >= unlock_at, Error::::Premature); + ensure!(system::Pallet::::block_number() >= unlock_at, Error::::Premature); let bounty_account = Self::bounty_account_id(bounty_id); let balance = T::Currency::free_balance(&bounty_account); let fee = bounty.fee.min(balance); // just to be safe let payout = balance.saturating_sub(fee); - let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); - let _ = T::Currency::transfer(&bounty_account, &curator, fee, AllowDeath); // should not fail - let _ = T::Currency::transfer(&bounty_account, &beneficiary, payout, AllowDeath); // should not fail + let err_amount = T::Currency::unreserve(&curator, bounty.curator_deposit); + debug_assert!(err_amount.is_zero()); + let res = T::Currency::transfer(&bounty_account, &curator, fee, AllowDeath); // should not fail + debug_assert!(res.is_ok()); + let res = T::Currency::transfer(&bounty_account, &beneficiary, payout, AllowDeath); // should not fail + debug_assert!(res.is_ok()); + *maybe_bounty = None; BountyDescriptions::remove(bounty_id); @@ -604,7 +609,8 @@ decl_module! { }, BountyStatus::Active { curator, .. } => { // Cancelled by council, refund deposit of the working curator. - let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); + let err_amount = T::Currency::unreserve(&curator, bounty.curator_deposit); + debug_assert!(err_amount.is_zero()); // Then execute removal of the bounty below. }, BountyStatus::PendingPayout { .. } => { @@ -621,7 +627,8 @@ decl_module! { BountyDescriptions::remove(bounty_id); let balance = T::Currency::free_balance(&bounty_account); - let _ = T::Currency::transfer(&bounty_account, &Self::account_id(), balance, AllowDeath); // should not fail + let res = T::Currency::transfer(&bounty_account, &Self::account_id(), balance, AllowDeath); // should not fail + debug_assert!(res.is_ok()); *maybe_bounty = None; Self::deposit_event(Event::::BountyCanceled(bounty_id)); @@ -649,7 +656,7 @@ decl_module! { match bounty.status { BountyStatus::Active { ref curator, ref mut update_due } => { ensure!(*curator == signer, Error::::RequireCurator); - *update_due = (system::Module::::block_number() + T::BountyUpdatePeriod::get()).max(*update_due); + *update_due = (system::Pallet::::block_number() + T::BountyUpdatePeriod::get()).max(*update_due); }, _ => return Err(Error::::UnexpectedStatus.into()), } @@ -736,7 +743,8 @@ impl pallet_treasury::SpendFunds for Module { bounty.status = BountyStatus::Funded; // return their deposit. - let _ = T::Currency::unreserve(&bounty.proposer, bounty.bond); + let err_amount = T::Currency::unreserve(&bounty.proposer, bounty.bond); + debug_assert!(err_amount.is_zero()); // fund the bounty account imbalance.subsume(T::Currency::deposit_creating(&Self::bounty_account_id(index), bounty.value)); diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index cbff502daa65e..617f186975269 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -43,10 +43,10 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Bounties: pallet_bounties::{Module, Call, Storage, Event}, - Treasury: pallet_treasury::{Module, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Bounties: pallet_bounties::{Pallet, Call, Storage, Event}, + Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, } ); @@ -107,7 +107,7 @@ parameter_types! { // impl pallet_treasury::Config for Test { impl pallet_treasury::Config for Test { type ModuleId = TreasuryModuleId; - type Currency = pallet_balances::Module; + type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type Event = Event; diff --git a/frame/collective/README.md b/frame/collective/README.md index f62df65f728cd..444927e51da22 100644 --- a/frame/collective/README.md +++ b/frame/collective/README.md @@ -7,19 +7,19 @@ The pallet assumes that the amount of members stays at or below `MaxMembers` for calculations, but enforces this neither in `set_members` nor in `change_members_sorted`. A "prime" member may be set to help determine the default vote behavior based on chain -config. If `PreimDefaultVote` is used, the prime vote acts as the default vote in case of any +config. If `PrimeDefaultVote` is used, the prime vote acts as the default vote in case of any abstentions after the voting period. If `MoreThanMajorityThenPrimeDefaultVote` is used, then abstentations will first follow the majority of the collective voting, and then the prime member. -Voting happens through motions comprising a proposal (i.e. a curried dispatchable) plus a +Voting happens through motions comprising a proposal (i.e. a dispatchable) plus a number of approvals required for it to pass and be called. Motions are open for members to -vote on for a minimum period given by `MotionDuration`. As soon as the needed number of +vote on for a minimum period given by `MotionDuration`. As soon as the required number of approvals is given, the motion is closed and executed. If the number of approvals is not reached during the voting period, then `close` may be called by any account in order to force the end -the motion explicitly. If a prime member is defined then their vote is used in place of any +the motion explicitly. If a prime member is defined, then their vote is used instead of any abstentions and the proposal is executed if there are enough approvals counting the new votes. -If there are not, or if no prime is set, then the motion is dropped without being executed. +If there are not, or if no prime member is set, then the motion is dropped without being executed. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index 1afdd14b1ad38..cd4fcfba5fe1e 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -31,7 +31,7 @@ use sp_runtime::traits::Bounded; use sp_std::mem::size_of; use frame_system::Call as SystemCall; -use frame_system::Module as System; +use frame_system::Pallet as System; use crate::Module as Collective; const SEED: u32 = 0; diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 6d9066bca241c..28c2ff77b81fe 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -472,7 +472,7 @@ decl_module! { let index = Self::proposal_count(); >::mutate(|i| *i += 1); >::insert(proposal_hash, *proposal); - let end = system::Module::::block_number() + T::MotionDuration::get(); + let end = system::Pallet::::block_number() + T::MotionDuration::get(); let votes = Votes { index, threshold, ayes: vec![who.clone()], nays: vec![], end }; >::insert(proposal_hash, votes); @@ -647,7 +647,7 @@ decl_module! { } // Only allow actual closing of the proposal after the voting period has ended. - ensure!(system::Module::::block_number() >= voting.end, Error::::TooEarly); + ensure!(system::Pallet::::block_number() >= voting.end, Error::::TooEarly); let prime_vote = Self::prime().map(|who| voting.ayes.iter().any(|a| a == &who)); @@ -1045,10 +1045,10 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event}, - Collective: collective::::{Module, Call, Event, Origin, Config}, - CollectiveMajority: collective::::{Module, Call, Event, Origin, Config}, - DefaultCollective: collective::{Module, Call, Event, Origin, Config}, + System: system::{Pallet, Call, Event}, + Collective: collective::::{Pallet, Call, Event, Origin, Config}, + CollectiveMajority: collective::::{Pallet, Call, Event, Origin, Config}, + DefaultCollective: collective::{Pallet, Call, Event, Origin, Config}, } ); diff --git a/frame/contracts/CHANGELOG.md b/frame/contracts/CHANGELOG.md index ef69e050a2c5f..efc3eb93c5701 100644 --- a/frame/contracts/CHANGELOG.md +++ b/frame/contracts/CHANGELOG.md @@ -20,7 +20,14 @@ In other words: Upgrading this pallet will not break pre-existing contracts. ### Added +- Make storage and fields of `Schedule` private to the crate. +[1](https://github.com/paritytech/substrate/pull/8359) + +- Add new version of `seal_random` which exposes additional information. +[1](https://github.com/paritytech/substrate/pull/8329) + - Add `seal_rent_params` contract callable function. +[1](https://github.com/paritytech/substrate/pull/8231) ## [v3.0.0] 2021-02-25 diff --git a/frame/contracts/COMPLEXITY.md b/frame/contracts/COMPLEXITY.md index 32f6f84b89b6a..f0e5a035586bc 100644 --- a/frame/contracts/COMPLEXITY.md +++ b/frame/contracts/COMPLEXITY.md @@ -176,7 +176,7 @@ Before a call or instantiate can be performed the execution context must be init For the first call or instantiation in the handling of an extrinsic, this involves two calls: 1. `>::now()` -2. `>::block_number()` +2. `>::block_number()` The complexity of initialization depends on the complexity of these functions. In the current implementation they just involve a DB read. diff --git a/frame/contracts/README.md b/frame/contracts/README.md index 1cb384e14c5a4..6c987165990b8 100644 --- a/frame/contracts/README.md +++ b/frame/contracts/README.md @@ -57,4 +57,19 @@ will make things a lot easier. One such language is [`ink`](https://github.com/p which is an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables writing WebAssembly based smart contracts in the Rust programming language. +## Debugging + +Contracts can emit messages to the node console when run on a development chain through the +`seal_println` API. This is exposed in ink! via +[`ink_env::debug_println()`](https://docs.rs/ink_env/latest/ink_env/fn.debug_println.html). + +In order to see these messages the log level for the `runtime::contracts` target needs to be raised +to at least the `info` level which is the default. However, those messages are easy to overlook +because of the noise generated by block production. A good starting point for contract debugging +could be: + +```bash +cargo run --release -- --dev --tmp -lerror,runtime::contracts +``` + License: Apache-2.0 diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index 64d2a0cf011d9..118ce038fc229 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -24,9 +24,7 @@ //! we define this simple definition of a contract that can be passed to `create_code` that //! compiles it down into a `WasmModule` that can be used as a contract's code. -use crate::Config; -use crate::Module as Contracts; - +use crate::{Config, CurrentSchedule}; use parity_wasm::elements::{ Instruction, Instructions, FuncBody, ValueType, BlockType, Section, CustomSection, }; @@ -225,7 +223,7 @@ where if def.inject_stack_metering { code = inject_limiter( code, - Contracts::::current_schedule().limits.stack_height + >::get().limits.stack_height ) .unwrap(); } @@ -505,5 +503,5 @@ where T: Config, T::AccountId: UncheckedFrom + AsRef<[u8]>, { - Contracts::::current_schedule().limits.memory_pages + >::get().limits.memory_pages } diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index d41154e995a67..3db04d3caf3dd 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -23,7 +23,7 @@ mod code; mod sandbox; use crate::{ - *, Module as Contracts, + *, Pallet as Contracts, exec::StorageKey, rent::Rent, schedule::{API_BENCHMARK_BATCH_SIZE, INSTR_BENCHMARK_BATCH_SIZE}, @@ -36,8 +36,9 @@ use self::{ }, sandbox::Sandbox, }; +use codec::Encode; use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; -use frame_system::{Module as System, RawOrigin}; +use frame_system::{Pallet as System, RawOrigin}; use parity_wasm::elements::{Instruction, ValueType, BlockType}; use sp_runtime::traits::{Hash, Bounded, Zero}; use sp_std::{default::Default, convert::{TryInto}, vec::Vec, vec}; @@ -313,7 +314,7 @@ benchmarks! { let WasmModule { code, hash, .. } = WasmModule::::sized(c * 1024); Contracts::::store_code_raw(code)?; let mut module = PrefabWasmModule::from_storage_noinstr(hash)?; - let schedule = Contracts::::current_schedule(); + let schedule = >::get(); }: { Contracts::::reinstrument_module(&mut module, &schedule)?; } @@ -936,7 +937,7 @@ benchmarks! { seal_random { let r in 0 .. API_BENCHMARK_BATCHES; let pages = code::max_pages::(); - let subject_len = Contracts::::current_schedule().limits.subject_len; + let subject_len = >::get().limits.subject_len; assert!(subject_len < 1024); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), @@ -992,7 +993,7 @@ benchmarks! { // `t`: Number of topics // `n`: Size of event payload in kb seal_deposit_event_per_topic_and_kb { - let t in 0 .. Contracts::::current_schedule().limits.event_topics; + let t in 0 .. >::get().limits.event_topics; let n in 0 .. T::MaxValueSize::get() / 1024; let mut topics = (0..API_BENCHMARK_BATCH_SIZE) .map(|n| (n * t..n * t + t).map(|i| T::Hashing::hash_of(&i)).collect::>().encode()) @@ -1922,7 +1923,7 @@ benchmarks! { // w_br_table_per_entry = w_bench instr_br_table_per_entry { - let e in 1 .. Contracts::::current_schedule().limits.br_table_size; + let e in 1 .. >::get().limits.br_table_size; let entry: Vec = [0, 1].iter() .cloned() .cycle() @@ -1978,7 +1979,7 @@ benchmarks! { // w_call_indrect = w_bench - 3 * w_param instr_call_indirect { let r in 0 .. INSTR_BENCHMARK_BATCHES; - let num_elements = Contracts::::current_schedule().limits.table_size; + let num_elements = >::get().limits.table_size; use self::code::TableSegment; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { // We need to make use of the stack here in order to trigger stack height @@ -2008,8 +2009,8 @@ benchmarks! { // linearly depend on the amount of parameters to this function. // Please note that this is not necessary with a direct call. instr_call_indirect_per_param { - let p in 0 .. Contracts::::current_schedule().limits.parameters; - let num_elements = Contracts::::current_schedule().limits.table_size; + let p in 0 .. >::get().limits.parameters; + let num_elements = >::get().limits.table_size; use self::code::TableSegment; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { // We need to make use of the stack here in order to trigger stack height @@ -2039,7 +2040,7 @@ benchmarks! { // w_local_get = w_bench - 1 * w_param instr_local_get { let r in 0 .. INSTR_BENCHMARK_BATCHES; - let max_locals = Contracts::::current_schedule().limits.stack_height; + let max_locals = >::get().limits.stack_height; let mut call_body = body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ RandomGetLocal(0, max_locals), Regular(Instruction::Drop), @@ -2056,7 +2057,7 @@ benchmarks! { // w_local_set = w_bench - 1 * w_param instr_local_set { let r in 0 .. INSTR_BENCHMARK_BATCHES; - let max_locals = Contracts::::current_schedule().limits.stack_height; + let max_locals = >::get().limits.stack_height; let mut call_body = body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ RandomI64Repeated(1), RandomSetLocal(0, max_locals), @@ -2073,7 +2074,7 @@ benchmarks! { // w_local_tee = w_bench - 2 * w_param instr_local_tee { let r in 0 .. INSTR_BENCHMARK_BATCHES; - let max_locals = Contracts::::current_schedule().limits.stack_height; + let max_locals = >::get().limits.stack_height; let mut call_body = body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ RandomI64Repeated(1), RandomTeeLocal(0, max_locals), @@ -2091,7 +2092,7 @@ benchmarks! { // w_global_get = w_bench - 1 * w_param instr_global_get { let r in 0 .. INSTR_BENCHMARK_BATCHES; - let max_globals = Contracts::::current_schedule().limits.globals; + let max_globals = >::get().limits.globals; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ RandomGetGlobal(0, max_globals), @@ -2107,7 +2108,7 @@ benchmarks! { // w_global_set = w_bench - 1 * w_param instr_global_set { let r in 0 .. INSTR_BENCHMARK_BATCHES; - let max_globals = Contracts::::current_schedule().limits.globals; + let max_globals = >::get().limits.globals; let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ RandomI64Repeated(1), diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index dc6e9771775ca..4ac5300d57d7f 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -47,6 +47,12 @@ //! induces. In order to be able to charge the correct weight for the functions defined //! by a chain extension benchmarks must be written, too. In the near future this crate //! will provide the means for easier creation of those specialized benchmarks. +//! +//! # Example +//! +//! The ink! repository maintains an +//! [end-to-end example](https://github.com/paritytech/ink/tree/master/examples/rand-extension) +//! on how to use a chain extension in order to provide new features to ink! contracts. use crate::{ Error, @@ -141,8 +147,8 @@ pub enum RetVal { /// Grants the chain extension access to its parameters and execution environment. /// -/// It uses the typestate pattern to enforce the correct usage of the parameters passed -/// to the chain extension. +/// It uses [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html) +/// to enforce the correct usage of the parameters passed to the chain extension. pub struct Environment<'a, 'b, E: Ext, S: state::State> { /// The actual data of this type. inner: Inner<'a, 'b, E>, @@ -376,6 +382,8 @@ mod state { pub trait BufIn: State {} pub trait BufOut: State {} + /// The initial state of an [`Environment`](`super::Environment`). + /// See [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html). pub enum Init {} pub enum OnlyIn {} pub enum PrimInBufOut {} diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 427cf1ada5ad5..27f70dea8c598 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -16,7 +16,7 @@ // limitations under the License. use crate::{ - CodeHash, Event, Config, Module as Contracts, + CodeHash, Event, Config, Pallet as Contracts, TrieId, BalanceOf, ContractInfo, gas::GasMeter, rent::Rent, storage::{self, Storage}, Error, ContractInfoOf, Schedule, AliveContractInfo, }; @@ -243,7 +243,7 @@ pub trait Ext: sealing::Sealed { fn tombstone_deposit(&self) -> BalanceOf; /// Returns a random number for the current block with the given subject. - fn random(&self, subject: &[u8]) -> SeedOf; + fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberOf); /// Deposit an event with the given topics. /// @@ -384,7 +384,7 @@ where depth: 0, schedule, timestamp: T::Time::now(), - block_number: >::block_number(), + block_number: >::block_number(), _phantom: Default::default(), } } @@ -845,10 +845,8 @@ where self.value_transferred } - fn random(&self, subject: &[u8]) -> SeedOf { - // TODO: change API to expose randomness freshness - // https://github.com/paritytech/substrate/issues/8297 - T::Randomness::random(subject).0 + fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberOf) { + T::Randomness::random(subject) } fn now(&self) -> &MomentOf { @@ -909,7 +907,7 @@ fn deposit_event( topics: Vec, event: Event, ) { - >::deposit_event_indexed( + >::deposit_event_indexed( &*topics, ::Event::from(event).into(), ) @@ -945,7 +943,7 @@ mod tests { test_utils::{place_contract, set_balance, get_balance}, }, exec::ExportedFunction::*, - Error, Weight, + Error, Weight, CurrentSchedule, }; use sp_runtime::DispatchError; use assert_matches::assert_matches; @@ -961,7 +959,7 @@ mod tests { } fn events() -> Vec> { - >::events() + >::events() .into_iter() .filter_map(|meta| match meta.event { MetaEvent::pallet_contracts(contract_event) => Some(contract_event), @@ -1141,7 +1139,7 @@ mod tests { }); ExtBuilder::default().build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); place_contract(&BOB, exec_ch); @@ -1191,7 +1189,7 @@ mod tests { ); ExtBuilder::default().build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(origin.clone(), &schedule); place_contract(&BOB, return_ch); set_balance(&origin, 100); @@ -1251,7 +1249,7 @@ mod tests { ); ExtBuilder::default().build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(origin, &schedule); place_contract(&BOB, return_ch); @@ -1280,7 +1278,7 @@ mod tests { ); ExtBuilder::default().build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(origin, &schedule); place_contract(&BOB, return_ch); @@ -1306,7 +1304,7 @@ mod tests { // This one tests passing the input data into a contract via call. ExtBuilder::default().build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); place_contract(&BOB, input_data_ch); @@ -1329,7 +1327,7 @@ mod tests { // This one tests passing the input data into a contract via instantiate. ExtBuilder::default().build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let subsistence = Contracts::::subsistence_threshold(); let mut ctx = MockContext::top_level(ALICE, &schedule); let mut gas_meter = GasMeter::::new(GAS_LIMIT); @@ -1382,7 +1380,7 @@ mod tests { }); ExtBuilder::default().build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); set_balance(&BOB, 1); place_contract(&BOB, recurse_ch); @@ -1430,7 +1428,7 @@ mod tests { }); ExtBuilder::default().build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(origin.clone(), &schedule); place_contract(&dest, bob_ch); place_contract(&CHARLIE, charlie_ch); @@ -1468,7 +1466,7 @@ mod tests { }); ExtBuilder::default().build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); place_contract(&BOB, bob_ch); place_contract(&CHARLIE, charlie_ch); @@ -1489,7 +1487,7 @@ mod tests { let dummy_ch = MockLoader::insert(Constructor, |_, _| exec_success()); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); let mut gas_meter = GasMeter::::new(GAS_LIMIT); let executable = MockExecutable::from_storage( @@ -1517,7 +1515,7 @@ mod tests { ); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); let mut gas_meter = GasMeter::::new(GAS_LIMIT); let executable = MockExecutable::from_storage( @@ -1553,7 +1551,7 @@ mod tests { ); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); let mut gas_meter = GasMeter::::new(GAS_LIMIT); let executable = MockExecutable::from_storage( @@ -1601,7 +1599,7 @@ mod tests { }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); set_balance(&ALICE, Contracts::::subsistence_threshold() * 100); place_contract(&BOB, instantiator_ch); @@ -1650,7 +1648,7 @@ mod tests { }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); set_balance(&ALICE, 1000); set_balance(&BOB, 100); @@ -1678,7 +1676,7 @@ mod tests { .existential_deposit(15) .build() .execute_with(|| { - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); let mut gas_meter = GasMeter::::new(GAS_LIMIT); let executable = MockExecutable::from_storage( @@ -1717,7 +1715,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let subsistence = Contracts::::subsistence_threshold(); - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); let mut gas_meter = GasMeter::::new(GAS_LIMIT); let executable = MockExecutable::from_storage( @@ -1749,7 +1747,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let subsistence = Contracts::::subsistence_threshold(); - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); let mut gas_meter = GasMeter::::new(GAS_LIMIT); set_balance(&ALICE, subsistence * 10); @@ -1797,7 +1795,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let subsistence = Contracts::::subsistence_threshold(); - let schedule = Contracts::current_schedule(); + let schedule = >::get(); let mut ctx = MockContext::top_level(ALICE, &schedule); let mut gas_meter = GasMeter::::new(GAS_LIMIT); set_balance(&ALICE, subsistence * 100); diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 5453e079e3ae4..46947ea9e1aef 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -15,21 +15,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Contract Module +//! # Contract Pallet //! //! The Contract module provides functionality for the runtime to deploy and execute WebAssembly smart-contracts. //! -//! - [`contract::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! -//! This module extends accounts based on the `Currency` trait to have smart-contract functionality. It can -//! be used with other modules that implement accounts based on `Currency`. These "smart-contract accounts" +//! This module extends accounts based on the [`Currency`] trait to have smart-contract functionality. It can +//! be used with other modules that implement accounts based on [`Currency`]. These "smart-contract accounts" //! have the ability to instantiate smart-contracts and make calls to other contract and non-contract accounts. //! -//! The smart-contract code is stored once in a `code_cache`, and later retrievable via its `code_hash`. -//! This means that multiple smart-contracts can be instantiated from the same `code_cache`, without replicating +//! The smart-contract code is stored once in a code cache, and later retrievable via its hash. +//! This means that multiple smart-contracts can be instantiated from the same hash, without replicating //! the code each time. //! //! When a smart-contract is called, its associated code is retrieved via the code hash and gets executed. @@ -59,12 +59,17 @@ //! //! ### Dispatchable functions //! -//! * `instantiate_with_code` - Deploys a new contract from the supplied wasm binary, optionally transferring -//! some balance. This instantiates a new smart contract account and calls its contract deploy -//! handler to initialize the contract. -//! * `instantiate` - The same as `instantiate_with_code` but instead of uploading new code an -//! existing `code_hash` is supplied. -//! * `call` - Makes a call to an account, optionally transferring some balance. +//! * [`Pallet::update_schedule`] - +//! ([Root Origin](https://substrate.dev/docs/en/knowledgebase/runtime/origin) Only) - +//! Set a new [`Schedule`]. +//! * [`Pallet::instantiate_with_code`] - Deploys a new contract from the supplied wasm binary, +//! optionally transferring +//! some balance. This instantiates a new smart contract account with the supplied code and +//! calls its constructor to initialize the contract. +//! * [`Pallet::instantiate`] - The same as `instantiate_with_code` but instead of uploading new +//! code an existing `code_hash` is supplied. +//! * [`Pallet::call`] - Makes a call to an account, optionally transferring some balance. +//! * [`Pallet::claim_surcharge`] - Evict a contract that cannot pay rent anymore. //! //! ## Usage //! @@ -98,47 +103,38 @@ pub mod weights; #[cfg(test)] mod tests; -pub use crate::{ - wasm::PrefabWasmModule, - schedule::{Schedule, HostFnWeights, InstructionWeights, Limits}, - pallet::*, -}; +pub use crate::{pallet::*, schedule::Schedule}; use crate::{ gas::GasMeter, exec::{ExecutionContext, Executable}, rent::Rent, - storage::{Storage, DeletedContract}, + storage::{Storage, DeletedContract, ContractInfo, AliveContractInfo, TombstoneContractInfo}, weights::WeightInfo, + wasm::PrefabWasmModule, }; use sp_core::crypto::UncheckedFrom; -use sp_std::{prelude::*, marker::PhantomData, fmt::Debug}; -use codec::{Codec, Encode, Decode}; +use sp_std::prelude::*; use sp_runtime::{ traits::{ - Hash, StaticLookup, MaybeSerializeDeserialize, Member, Convert, Saturating, Zero, + Hash, StaticLookup, Convert, Saturating, Zero, }, - RuntimeDebug, Perbill, + Perbill, }; use frame_support::{ - storage::child::ChildInfo, traits::{OnUnbalanced, Currency, Get, Time, Randomness}, weights::{Weight, PostDispatchInfo, WithPostDispatchInfo}, }; -use frame_system::Module as System; +use frame_system::Pallet as System; use pallet_contracts_primitives::{ RentProjectionResult, GetStorageResult, ContractAccessError, ContractExecResult, }; -pub type CodeHash = ::Hash; -pub type TrieId = Vec; -pub type BalanceOf = +type CodeHash = ::Hash; +type TrieId = Vec; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -pub type NegativeImbalanceOf = +type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; -pub type AliveContractInfo = - RawAliveContractInfo, BalanceOf, ::BlockNumber>; -pub type TombstoneContractInfo = - RawTombstoneContractInfo<::Hash, ::Hashing>; #[frame_support::pallet] pub mod pallet { @@ -221,7 +217,7 @@ pub mod pallet { #[pallet::constant] type MaxValueSize: Get; - /// Used to answer contracts's queries regarding the current weight price. This is **not** + /// Used to answer contracts' queries regarding the current weight price. This is **not** /// used to calculate the actual fee and is only for informational purposes. type WeightPrice: Convert>; @@ -248,7 +244,6 @@ pub mod pallet { } #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet(PhantomData); #[pallet::hooks] @@ -290,7 +285,7 @@ pub mod pallet { schedule: Schedule ) -> DispatchResultWithPostInfo { ensure_root(origin)?; - if >::current_schedule().version > schedule.version { + if >::get().version > schedule.version { Err(Error::::InvalidScheduleVersion)? } Self::deposit_event(Event::ScheduleUpdated(schedule.version)); @@ -316,7 +311,7 @@ pub mod pallet { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; let mut gas_meter = GasMeter::new(gas_limit); - let schedule = >::current_schedule(); + let schedule = >::get(); let mut ctx = ExecutionContext::>::top_level(origin, &schedule); let (result, code_len) = match ctx.call(dest, value, &mut gas_meter, data) { Ok((output, len)) => (Ok(output), len), @@ -336,7 +331,7 @@ pub mod pallet { /// * `gas_limit`: The gas limit enforced when executing the constructor. /// * `code`: The contract code to deploy in raw bytes. /// * `data`: The input data to pass to the contract constructor. - /// * `salt`: Used for the address derivation. See [`Self::contract_address`]. + /// * `salt`: Used for the address derivation. See [`Pallet::contract_address`]. /// /// Instantiation is executed as follows: /// @@ -365,7 +360,7 @@ pub mod pallet { let code_len = code.len() as u32; ensure!(code_len <= T::MaxCodeSize::get(), Error::::CodeTooLarge); let mut gas_meter = GasMeter::new(gas_limit); - let schedule = >::current_schedule(); + let schedule = >::get(); let executable = PrefabWasmModule::from_code(code, &schedule)?; let code_len = executable.code_len(); ensure!(code_len <= T::MaxCodeSize::get(), Error::::CodeTooLarge); @@ -397,7 +392,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; let mut gas_meter = GasMeter::new(gas_limit); - let schedule = >::current_schedule(); + let schedule = >::get(); let executable = PrefabWasmModule::from_storage(code_hash, &schedule, &mut gas_meter)?; let mut ctx = ExecutionContext::>::top_level(origin, &schedule); let code_len = executable.code_len(); @@ -614,33 +609,33 @@ pub mod pallet { /// Current cost schedule for contracts. #[pallet::storage] - #[pallet::getter(fn current_schedule)] - pub(super) type CurrentSchedule = StorageValue<_, Schedule, ValueQuery>; + pub(crate) type CurrentSchedule = StorageValue<_, Schedule, ValueQuery>; /// A mapping from an original code hash to the original code, untouched by instrumentation. #[pallet::storage] - pub type PristineCode = StorageMap<_, Identity, CodeHash, Vec>; + pub(crate) type PristineCode = StorageMap<_, Identity, CodeHash, Vec>; /// A mapping between an original code hash and instrumented wasm code, ready for execution. #[pallet::storage] - pub type CodeStorage = StorageMap<_, Identity, CodeHash, PrefabWasmModule>; + pub(crate) type CodeStorage = StorageMap<_, Identity, CodeHash, PrefabWasmModule>; /// The subtrie counter. #[pallet::storage] - pub type AccountCounter = StorageValue<_, u64, ValueQuery>; + pub(crate) type AccountCounter = StorageValue<_, u64, ValueQuery>; /// The code associated with a given account. /// /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. #[pallet::storage] - pub type ContractInfoOf = StorageMap<_, Twox64Concat, T::AccountId, ContractInfo>; + pub(crate) type ContractInfoOf = StorageMap<_, Twox64Concat, T::AccountId, ContractInfo>; /// Evicted contracts that await child trie deletion. /// /// Child trie deletion is a heavy operation depending on the amount of storage items /// stored in said trie. Therefore this operation is performed lazily in `on_initialize`. #[pallet::storage] - pub type DeletionQueue = StorageValue<_, Vec, ValueQuery>; + pub(crate) type DeletionQueue = StorageValue<_, Vec, ValueQuery>; + #[pallet::genesis_config] pub struct GenesisConfig { @@ -665,7 +660,7 @@ pub mod pallet { } } -impl Module +impl Pallet where T::AccountId: UncheckedFrom + AsRef<[u8]>, { @@ -683,7 +678,7 @@ where input_data: Vec, ) -> ContractExecResult { let mut gas_meter = GasMeter::new(gas_limit); - let schedule = >::current_schedule(); + let schedule = >::get(); let mut ctx = ExecutionContext::>::top_level(origin, &schedule); let result = ctx.call(dest, value, &mut gas_meter, input_data); let gas_consumed = gas_meter.gas_spent(); @@ -743,10 +738,24 @@ where T::Currency::minimum_balance().saturating_add(T::TombstoneDeposit::get()) } + /// The in-memory size in bytes of the data structure associated with each contract. + /// + /// The data structure is also put into storage for each contract. The in-storage size + /// is never larger than the in-memory representation and usually smaller due to compact + /// encoding and lack of padding. + /// + /// # Note + /// + /// This returns the in-memory size because the in-storage size (SCALE encoded) cannot + /// be efficiently determined. Treat this as an upper bound of the in-storage size. + pub fn contract_info_size() -> u32 { + sp_std::mem::size_of::>() as u32 + } + /// Store code for benchmarks which does not check nor instrument the code. #[cfg(feature = "runtime-benchmarks")] fn store_code_raw(code: Vec) -> frame_support::dispatch::DispatchResult { - let schedule = >::current_schedule(); + let schedule = >::get(); PrefabWasmModule::store_code_unchecked(code, &schedule)?; Ok(()) } @@ -760,127 +769,3 @@ where self::wasm::reinstrument(module, schedule) } } - -/// Information for managing an account and its sub trie abstraction. -/// This is the required info to cache for an account -#[derive(Encode, Decode, RuntimeDebug)] -pub enum ContractInfo { - Alive(AliveContractInfo), - Tombstone(TombstoneContractInfo), -} - -impl ContractInfo { - /// If contract is alive then return some alive info - pub fn get_alive(self) -> Option> { - if let ContractInfo::Alive(alive) = self { - Some(alive) - } else { - None - } - } - /// If contract is alive then return some reference to alive info - pub fn as_alive(&self) -> Option<&AliveContractInfo> { - if let ContractInfo::Alive(ref alive) = self { - Some(alive) - } else { - None - } - } - /// If contract is alive then return some mutable reference to alive info - pub fn as_alive_mut(&mut self) -> Option<&mut AliveContractInfo> { - if let ContractInfo::Alive(ref mut alive) = self { - Some(alive) - } else { - None - } - } - - /// If contract is tombstone then return some tombstone info - pub fn get_tombstone(self) -> Option> { - if let ContractInfo::Tombstone(tombstone) = self { - Some(tombstone) - } else { - None - } - } - /// If contract is tombstone then return some reference to tombstone info - pub fn as_tombstone(&self) -> Option<&TombstoneContractInfo> { - if let ContractInfo::Tombstone(ref tombstone) = self { - Some(tombstone) - } else { - None - } - } - /// If contract is tombstone then return some mutable reference to tombstone info - pub fn as_tombstone_mut(&mut self) -> Option<&mut TombstoneContractInfo> { - if let ContractInfo::Tombstone(ref mut tombstone) = self { - Some(tombstone) - } else { - None - } - } -} - -/// Information for managing an account and its sub trie abstraction. -/// This is the required info to cache for an account. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct RawAliveContractInfo { - /// Unique ID for the subtree encoded as a bytes vector. - pub trie_id: TrieId, - /// The total number of bytes used by this contract. - /// - /// It is a sum of each key-value pair stored by this contract. - pub storage_size: u32, - /// The total number of key-value pairs in storage of this contract. - pub pair_count: u32, - /// The code associated with a given account. - pub code_hash: CodeHash, - /// Pay rent at most up to this value. - pub rent_allowance: Balance, - /// The amount of rent that was payed by the contract over its whole lifetime. - /// - /// A restored contract starts with a value of zero just like a new contract. - pub rent_payed: Balance, - /// Last block rent has been payed. - pub deduct_block: BlockNumber, - /// Last block child storage has been written. - pub last_write: Option, - /// This field is reserved for future evolution of format. - pub _reserved: Option<()>, -} - -impl RawAliveContractInfo { - /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_info(&self) -> ChildInfo { - child_trie_info(&self.trie_id[..]) - } -} - -/// Associated child trie unique id is built from the hash part of the trie id. -pub(crate) fn child_trie_info(trie_id: &[u8]) -> ChildInfo { - ChildInfo::new_default(trie_id) -} - -#[derive(Encode, Decode, PartialEq, Eq, RuntimeDebug)] -pub struct RawTombstoneContractInfo(H, PhantomData); - -impl RawTombstoneContractInfo -where - H: Member + MaybeSerializeDeserialize+ Debug - + AsRef<[u8]> + AsMut<[u8]> + Copy + Default - + sp_std::hash::Hash + Codec, - Hasher: Hash, -{ - fn new(storage_root: &[u8], code_hash: H) -> Self { - let mut buf = Vec::new(); - storage_root.using_encoded(|encoded| buf.extend_from_slice(encoded)); - buf.extend_from_slice(code_hash.as_ref()); - RawTombstoneContractInfo(::hash(&buf[..]), PhantomData) - } -} - -impl From> for ContractInfo { - fn from(alive_info: AliveContractInfo) -> Self { - Self::Alive(alive_info) - } -} diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index e9befeee2d370..8605451ad1ee7 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -18,7 +18,7 @@ //! A module responsible for computing the right amount of weight and charging it. use crate::{ - AliveContractInfo, BalanceOf, ContractInfo, ContractInfoOf, Module, Event, + AliveContractInfo, BalanceOf, ContractInfo, ContractInfoOf, Pallet, Event, TombstoneContractInfo, Config, CodeHash, Error, storage::Storage, wasm::PrefabWasmModule, exec::Executable, }; @@ -124,7 +124,7 @@ where free_balance: &BalanceOf, contract: &AliveContractInfo, ) -> Option> { - let subsistence_threshold = Module::::subsistence_threshold(); + let subsistence_threshold = Pallet::::subsistence_threshold(); // Reserved balance contributes towards the subsistence threshold to stay consistent // with the existential deposit where the reserved balance is also counted. if *total_balance < subsistence_threshold { @@ -245,7 +245,6 @@ where evictable_code: Option>, ) -> Result>, DispatchError> { match (verdict, evictable_code) { - (Verdict::Exempt, _) => return Ok(Some(alive_contract_info)), (Verdict::Evict { amount }, Some(code)) => { // We need to remove the trie first because it is the only operation // that can fail and this function is called without a storage @@ -268,12 +267,20 @@ where let tombstone_info = ContractInfo::Tombstone(tombstone); >::insert(account, &tombstone_info); code.drop_from_storage(); - >::deposit_event(Event::Evicted(account.clone())); + >::deposit_event(Event::Evicted(account.clone())); Ok(None) } (Verdict::Evict { amount: _ }, None) => { Ok(None) } + (Verdict::Exempt, _) => { + let contract = ContractInfo::Alive(AliveContractInfo:: { + deduct_block: current_block_number, + ..alive_contract_info + }); + >::insert(account, &contract); + Ok(Some(contract.get_alive().expect("We just constructed it as alive. qed"))) + }, (Verdict::Charge { amount }, _) => { let contract = ContractInfo::Alive(AliveContractInfo:: { rent_allowance: alive_contract_info.rent_allowance - amount.peek(), @@ -298,7 +305,7 @@ where contract: AliveContractInfo, code_size: u32, ) -> Result>, DispatchError> { - let current_block_number = >::block_number(); + let current_block_number = >::block_number(); let verdict = Self::consider_case( account, current_block_number, @@ -333,7 +340,7 @@ where }; let module = PrefabWasmModule::::from_storage_noinstr(contract.code_hash)?; let code_len = module.code_len(); - let current_block_number = >::block_number(); + let current_block_number = >::block_number(); let verdict = Self::consider_case( account, current_block_number, @@ -384,7 +391,7 @@ where let module = PrefabWasmModule::from_storage_noinstr(alive_contract_info.code_hash) .map_err(|_| IsTombstone)?; let code_size = module.occupied_storage(); - let current_block_number = >::block_number(); + let current_block_number = >::block_number(); let verdict = Self::consider_case( account, current_block_number, @@ -465,7 +472,7 @@ where let child_trie_info = origin_contract.child_trie_info(); - let current_block = >::block_number(); + let current_block = >::block_number(); if origin_contract.last_write == Some(current_block) { return Err((Error::::InvalidContractOrigin.into(), 0, 0)); diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index 24ba83cc1b799..90c396c627775 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -39,7 +39,11 @@ pub const API_BENCHMARK_BATCH_SIZE: u32 = 100; /// as for `API_BENCHMARK_BATCH_SIZE`. pub const INSTR_BENCHMARK_BATCH_SIZE: u32 = 1_000; -/// Definition of the cost schedule and other parameterizations for wasm vm. +/// Definition of the cost schedule and other parameterizations for the wasm vm. +/// +/// Its fields are private to the crate in order to allow addition of new contract +/// callable functions without bumping to a new major version. A genesis config should +/// rely on public functions of this type. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(bound(serialize = "", deserialize = "")))] #[derive(Clone, Encode, Decode, PartialEq, Eq, ScheduleDebug)] @@ -53,20 +57,20 @@ pub struct Schedule { /// of all contracts which are triggered by a version comparison on call. /// Changes to other parts of the schedule should not increment the version in /// order to avoid unnecessary re-instrumentations. - pub version: u32, + pub(crate) version: u32, /// Whether the `seal_println` function is allowed to be used contracts. /// MUST only be enabled for `dev` chains, NOT for production chains - pub enable_println: bool, + pub(crate) enable_println: bool, /// Describes the upper limits on various metrics. - pub limits: Limits, + pub(crate) limits: Limits, /// The weights for individual wasm instructions. - pub instruction_weights: InstructionWeights, + pub(crate) instruction_weights: InstructionWeights, /// The weights for each imported function a contract is allowed to call. - pub host_fn_weights: HostFnWeights, + pub(crate) host_fn_weights: HostFnWeights, } /// Describes the upper limits on various metrics. @@ -602,7 +606,21 @@ struct ScheduleRules<'a, T: Config> { } impl Schedule { - pub fn rules(&self, module: &elements::Module) -> impl rules::Rules + '_ { + /// Allow contracts to call `seal_println` in order to print messages to the console. + /// + /// This should only ever be activated in development chains. The printed messages + /// can be observed on the console by setting the environment variable + /// `RUST_LOG=runtime=debug` when running the node. + /// + /// # Note + /// + /// Is set to `false` by default. + pub fn enable_println(mut self, enable: bool) -> Self { + self.enable_println = enable; + self + } + + pub(crate) fn rules(&self, module: &elements::Module) -> impl rules::Rules + '_ { ScheduleRules { schedule: &self, params: module diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 970eec2003668..d78551f8f170e 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -19,23 +19,131 @@ use crate::{ exec::{AccountIdOf, StorageKey}, - AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Config, TrieId, + BalanceOf, CodeHash, ContractInfoOf, Config, TrieId, AccountCounter, DeletionQueue, Error, weights::WeightInfo, }; -use codec::{Encode, Decode}; +use codec::{Codec, Encode, Decode}; use sp_std::prelude::*; -use sp_std::marker::PhantomData; +use sp_std::{marker::PhantomData, fmt::Debug}; use sp_io::hashing::blake2_256; -use sp_runtime::traits::{Bounded, Saturating, Zero}; +use sp_runtime::{ + RuntimeDebug, + traits::{Bounded, Saturating, Zero, Hash, Member, MaybeSerializeDeserialize}, +}; use sp_core::crypto::UncheckedFrom; use frame_support::{ dispatch::{DispatchError, DispatchResult}, - storage::child::{self, KillChildStorageResult}, + storage::child::{self, KillChildStorageResult, ChildInfo}, traits::Get, weights::Weight, }; +pub type AliveContractInfo = + RawAliveContractInfo, BalanceOf, ::BlockNumber>; +pub type TombstoneContractInfo = + RawTombstoneContractInfo<::Hash, ::Hashing>; + +/// Information for managing an account and its sub trie abstraction. +/// This is the required info to cache for an account +#[derive(Encode, Decode, RuntimeDebug)] +pub enum ContractInfo { + Alive(AliveContractInfo), + Tombstone(TombstoneContractInfo), +} + +impl ContractInfo { + /// If contract is alive then return some alive info + pub fn get_alive(self) -> Option> { + if let ContractInfo::Alive(alive) = self { + Some(alive) + } else { + None + } + } + /// If contract is alive then return some reference to alive info + pub fn as_alive(&self) -> Option<&AliveContractInfo> { + if let ContractInfo::Alive(ref alive) = self { + Some(alive) + } else { + None + } + } + + /// If contract is tombstone then return some tombstone info + pub fn get_tombstone(self) -> Option> { + if let ContractInfo::Tombstone(tombstone) = self { + Some(tombstone) + } else { + None + } + } +} + +/// Information for managing an account and its sub trie abstraction. +/// This is the required info to cache for an account. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct RawAliveContractInfo { + /// Unique ID for the subtree encoded as a bytes vector. + pub trie_id: TrieId, + /// The total number of bytes used by this contract. + /// + /// It is a sum of each key-value pair stored by this contract. + pub storage_size: u32, + /// The total number of key-value pairs in storage of this contract. + pub pair_count: u32, + /// The code associated with a given account. + pub code_hash: CodeHash, + /// Pay rent at most up to this value. + pub rent_allowance: Balance, + /// The amount of rent that was payed by the contract over its whole lifetime. + /// + /// A restored contract starts with a value of zero just like a new contract. + pub rent_payed: Balance, + /// Last block rent has been payed. + pub deduct_block: BlockNumber, + /// Last block child storage has been written. + pub last_write: Option, + /// This field is reserved for future evolution of format. + pub _reserved: Option<()>, +} + +impl RawAliveContractInfo { + /// Associated child trie unique id is built from the hash part of the trie id. + pub fn child_trie_info(&self) -> ChildInfo { + child_trie_info(&self.trie_id[..]) + } +} + +/// Associated child trie unique id is built from the hash part of the trie id. +fn child_trie_info(trie_id: &[u8]) -> ChildInfo { + ChildInfo::new_default(trie_id) +} + +#[derive(Encode, Decode, PartialEq, Eq, RuntimeDebug)] +pub struct RawTombstoneContractInfo(H, PhantomData); + +impl RawTombstoneContractInfo +where + H: Member + MaybeSerializeDeserialize+ Debug + + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + + sp_std::hash::Hash + Codec, + Hasher: Hash, +{ + pub fn new(storage_root: &[u8], code_hash: H) -> Self { + let mut buf = Vec::new(); + storage_root.using_encoded(|encoded| buf.extend_from_slice(encoded)); + buf.extend_from_slice(code_hash.as_ref()); + RawTombstoneContractInfo(::hash(&buf[..]), PhantomData) + } +} + +impl From> for ContractInfo { + fn from(alive_info: AliveContractInfo) -> Self { + Self::Alive(alive_info) + } +} + /// An error that means that the account requested either doesn't exist or represents a tombstone /// account. #[cfg_attr(test, derive(PartialEq, Eq, Debug))] @@ -59,7 +167,7 @@ where /// The read is performed from the `trie_id` only. The `address` is not necessary. If the contract /// doesn't store under the given `key` `None` is returned. pub fn read(trie_id: &TrieId, key: &StorageKey) -> Option> { - child::get_raw(&crate::child_trie_info(&trie_id), &blake2_256(key)) + child::get_raw(&child_trie_info(&trie_id), &blake2_256(key)) } /// Update a storage entry into a contract's kv storage. @@ -87,7 +195,7 @@ where }; let hashed_key = blake2_256(key); - let child_trie_info = &crate::child_trie_info(&trie_id); + let child_trie_info = &child_trie_info(&trie_id); let opt_prev_len = child::len(&child_trie_info, &hashed_key); @@ -117,7 +225,7 @@ where .and_then(|val| val.checked_add(new_value_len)) .ok_or_else(|| Error::::StorageExhausted)?; - new_info.last_write = Some(>::block_number()); + new_info.last_write = Some(>::block_number()); >::insert(&account, ContractInfo::Alive(new_info)); // Finally, perform the change on the storage. @@ -176,7 +284,7 @@ where // We want to charge rent for the first block in advance. Therefore we // treat the contract as if it was created in the last block and then // charge rent for it during instantiation. - >::block_number().saturating_sub(1u32.into()), + >::block_number().saturating_sub(1u32.into()), rent_allowance: >::max_value(), rent_payed: >::zero(), pair_count: 0, @@ -257,7 +365,7 @@ where let trie = &mut queue[0]; let pair_count = trie.pair_count; let outcome = child::kill_storage( - &crate::child_trie_info(&trie.trie_id), + &child_trie_info(&trie.trie_id), Some(remaining_key_budget), ); if pair_count > remaining_key_budget { @@ -290,7 +398,6 @@ where /// This generator uses inner counter for account id and applies the hash over `AccountId + /// accountid_counter`. pub fn generate_trie_id(account_id: &AccountIdOf) -> TrieId { - use sp_runtime::traits::Hash; // Note that skipping a value due to error is not an issue here. // We only need uniqueness, not sequence. let new_seed = >::mutate(|v| { diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 2fa09e3405c1c..5fb637f3e9f18 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -16,8 +16,8 @@ // limitations under the License. use crate::{ - BalanceOf, ContractInfo, ContractInfoOf, Module, - RawAliveContractInfo, Config, Schedule, + BalanceOf, ContractInfo, ContractInfoOf, Pallet, + Config, Schedule, Error, storage::Storage, chain_extension::{ Result as ExtensionResult, Environment, ChainExtension, Ext, SysConfig, RetVal, @@ -26,6 +26,7 @@ use crate::{ exec::{AccountIdOf, Executable}, wasm::PrefabWasmModule, weights::WeightInfo, wasm::ReturnCode as RuntimeReturnCode, + storage::RawAliveContractInfo, }; use assert_matches::assert_matches; use codec::Encode; @@ -57,11 +58,11 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, - Randomness: pallet_randomness_collective_flip::{Module, Call, Storage}, - Contracts: pallet_contracts::{Module, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Randomness: pallet_randomness_collective_flip::{Pallet, Call, Storage}, + Contracts: pallet_contracts::{Pallet, Call, Config, Storage, Event}, } ); @@ -72,7 +73,7 @@ pub mod test_utils { ContractInfoOf, CodeHash, storage::Storage, exec::{StorageKey, AccountIdOf}, - Module as Contracts, + Pallet as Contracts, }; use frame_support::traits::Currency; @@ -246,7 +247,7 @@ parameter_types! { pub const DepositPerContract: u64 = 8 * DepositPerStorageByte::get(); pub const DepositPerStorageByte: u64 = 10_000; pub const DepositPerStorageItem: u64 = 10_000; - pub RentFraction: Perbill = Perbill::from_rational_approximation(4u32, 10_000u32); + pub RentFraction: Perbill = Perbill::from_rational(4u32, 10_000u32); pub const SurchargeReward: u64 = 500_000; pub const MaxDepth: u32 = 100; pub const MaxValueSize: u32 = 16_384; @@ -457,7 +458,7 @@ fn instantiate_and_call_and_deposit_event() { .build() .execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); // Check at the end to get hash on error easily let creation = Contracts::instantiate_with_code( @@ -572,7 +573,7 @@ fn deposit_event_max_value_limit() { #[test] fn run_out_of_gas() { let (wasm, code_hash) = compile_module::("run_out_of_gas").unwrap(); - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); ExtBuilder::default() .existential_deposit(50) @@ -908,7 +909,7 @@ fn removals(trigger_call: impl Fn(AccountIdOf) -> bool) { .unwrap().get_alive().unwrap().rent_allowance; let balance = Balances::free_balance(&addr); - let subsistence_threshold = Module::::subsistence_threshold(); + let subsistence_threshold = Pallet::::subsistence_threshold(); // Trigger rent must have no effect assert!(!trigger_call(addr.clone())); @@ -997,7 +998,7 @@ fn removals(trigger_call: impl Fn(AccountIdOf) -> bool) { .build() .execute_with(|| { // Create - let subsistence_threshold = Module::::subsistence_threshold(); + let subsistence_threshold = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, subsistence_threshold * 1000); assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), @@ -1878,7 +1879,7 @@ fn crypto_hashes() { // We offset data in the contract tables by 1. let mut params = vec![(n + 1) as u8]; params.extend_from_slice(input); - let result = >::bare_call( + let result = >::bare_call( ALICE, addr.clone(), 0, @@ -1896,7 +1897,7 @@ fn crypto_hashes() { fn transfer_return_code() { let (wasm, code_hash) = compile_module::("transfer_return_code").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); assert_ok!( @@ -1943,7 +1944,7 @@ fn call_return_code() { let (caller_code, caller_hash) = compile_module::("call_return_code").unwrap(); let (callee_code, callee_hash) = compile_module::("ok_trap_revert").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); @@ -2036,7 +2037,7 @@ fn instantiate_return_code() { let (caller_code, caller_hash) = compile_module::("instantiate_return_code").unwrap(); let (callee_code, callee_hash) = compile_module::("ok_trap_revert").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); let callee_hash = callee_hash.as_ref().to_vec(); @@ -2127,7 +2128,7 @@ fn instantiate_return_code() { fn disabled_chain_extension_wont_deploy() { let (code, _hash) = compile_module::("chain_extension").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); TestExtension::disable(); assert_err_ignore_postinfo!( @@ -2148,7 +2149,7 @@ fn disabled_chain_extension_wont_deploy() { fn disabled_chain_extension_errors_on_call() { let (code, hash) = compile_module::("chain_extension").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); assert_ok!( Contracts::instantiate_with_code( @@ -2179,7 +2180,7 @@ fn disabled_chain_extension_errors_on_call() { fn chain_extension_works() { let (code, hash) = compile_module::("chain_extension").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); assert_ok!( Contracts::instantiate_with_code( @@ -2248,7 +2249,7 @@ fn chain_extension_works() { fn lazy_removal_works() { let (code, hash) = compile_module::("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); assert_ok!( @@ -2308,7 +2309,7 @@ fn lazy_removal_partial_remove_works() { let mut ext = ExtBuilder::default().existential_deposit(50).build(); let trie = ext.execute_with(|| { - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); assert_ok!( @@ -2389,7 +2390,7 @@ fn lazy_removal_partial_remove_works() { fn lazy_removal_does_no_run_on_full_block() { let (code, hash) = compile_module::("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); assert_ok!( @@ -2473,7 +2474,7 @@ fn lazy_removal_does_no_run_on_full_block() { fn lazy_removal_does_not_use_all_weight() { let (code, hash) = compile_module::("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); assert_ok!( @@ -2543,7 +2544,7 @@ fn lazy_removal_does_not_use_all_weight() { fn deletion_queue_full() { let (code, hash) = compile_module::("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); assert_ok!( @@ -2669,7 +2670,7 @@ fn refcounter() { let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); // Create two contracts with the same code and check that they do in fact share it. assert_ok!(Contracts::instantiate_with_code( @@ -2741,7 +2742,7 @@ fn reinstrument_does_charge() { let (wasm, code_hash) = compile_module::("return_with_data").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let subsistence = Module::::subsistence_threshold(); + let subsistence = Pallet::::subsistence_threshold(); let zero = 0u32.to_le_bytes().encode(); let code_len = wasm.len() as u32; diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 0b2512f17f594..f9513afe51f4f 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -29,7 +29,7 @@ use crate::{ CodeHash, CodeStorage, PristineCode, Schedule, Config, Error, Weight, - wasm::{prepare, PrefabWasmModule}, Module as Contracts, Event, + wasm::{prepare, PrefabWasmModule}, Pallet as Contracts, Event, gas::{GasMeter, Token}, weights::WeightInfo, }; diff --git a/frame/contracts/src/wasm/env_def/macros.rs b/frame/contracts/src/wasm/env_def/macros.rs index 3c10d3225e430..cfb529d2932b6 100644 --- a/frame/contracts/src/wasm/env_def/macros.rs +++ b/frame/contracts/src/wasm/env_def/macros.rs @@ -43,21 +43,23 @@ macro_rules! gen_signature { macro_rules! gen_signature_dispatch { ( + $needle_module:ident, $needle_name:ident, $needle_sig:ident ; + $module:ident, $name:ident - ( $ctx:ident $( , $names:ident : $params:ty )* ) $( -> $returns:ty )* , $($rest:tt)* ) => { - if stringify!($name).as_bytes() == $needle_name { + ( $ctx:ident $( , $names:ident : $params:ty )* ) $( -> $returns:ty )* , $($rest:tt)* + ) => { + if stringify!($module).as_bytes() == $needle_module && stringify!($name).as_bytes() == $needle_name { let signature = gen_signature!( ( $( $params ),* ) $( -> $returns )* ); if $needle_sig == &signature { return true; } } else { - gen_signature_dispatch!($needle_name, $needle_sig ; $($rest)*); + gen_signature_dispatch!($needle_module, $needle_name, $needle_sig ; $($rest)*); } }; - ( $needle_name:ident, $needle_sig:ident ; ) => { - }; + ( $needle_module:ident, $needle_name:ident, $needle_sig:ident ; ) => {}; } /// Unmarshall arguments and then execute `body` expression and return its result. @@ -151,10 +153,11 @@ macro_rules! register_func { ( $reg_cb:ident, < E: $seal_ty:tt > ; ) => {}; ( $reg_cb:ident, < E: $seal_ty:tt > ; - $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) + $module:ident $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) $( -> $returns:ty )* => $body:tt $($rest:tt)* ) => { $reg_cb( + stringify!($module).as_bytes(), stringify!($name).as_bytes(), { define_func!( @@ -176,14 +179,17 @@ macro_rules! register_func { /// and reject the code if any imported function has a mismatched signature. macro_rules! define_env { ( $init_name:ident , < E: $seal_ty:tt > , - $( $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) + $( [$module:ident] $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) $( -> $returns:ty )* => $body:tt , )* ) => { pub struct $init_name; impl $crate::wasm::env_def::ImportSatisfyCheck for $init_name { - fn can_satisfy(name: &[u8], func_type: &parity_wasm::elements::FunctionType) -> bool { - gen_signature_dispatch!( name, func_type ; $( $name ( $ctx $(, $names : $params )* ) $( -> $returns )* , )* ); + fn can_satisfy(module: &[u8], name: &[u8], func_type: &parity_wasm::elements::FunctionType) -> bool { + gen_signature_dispatch!( + module, name, func_type ; + $( $module, $name ( $ctx $(, $names : $params )* ) $( -> $returns )* , )* + ); return false; } @@ -195,8 +201,12 @@ macro_rules! define_env { sp_core::crypto::UncheckedFrom<::Hash> + AsRef<[u8]> { - fn impls)>(f: &mut F) { - register_func!(f, < E: $seal_ty > ; $( $name ( $ctx $( , $names : $params )* ) $( -> $returns)* => $body )* ); + fn impls)>(f: &mut F) { + register_func!( + f, + < E: $seal_ty > ; + $( $module $name ( $ctx $( , $names : $params )* ) $( -> $returns)* => $body )* + ); } } }; @@ -327,7 +337,7 @@ mod tests { use crate::wasm::env_def::ImportSatisfyCheck; define_env!(Env, , - seal_gas( _ctx, amount: u32 ) => { + [seal0] seal_gas( _ctx, amount: u32 ) => { let amount = Weight::from(amount); if !amount.is_zero() { Ok(()) @@ -337,7 +347,11 @@ mod tests { }, ); - assert!(Env::can_satisfy(b"seal_gas", &FunctionType::new(vec![ValueType::I32], None))); - assert!(!Env::can_satisfy(b"not_exists", &FunctionType::new(vec![], None))); + assert!( + Env::can_satisfy(b"seal0", b"seal_gas",&FunctionType::new(vec![ValueType::I32], None)) + ); + assert!( + !Env::can_satisfy(b"seal0", b"not_exists", &FunctionType::new(vec![], None)) + ); } } diff --git a/frame/contracts/src/wasm/env_def/mod.rs b/frame/contracts/src/wasm/env_def/mod.rs index 997ec29e028d5..6d33444b04dfe 100644 --- a/frame/contracts/src/wasm/env_def/mod.rs +++ b/frame/contracts/src/wasm/env_def/mod.rs @@ -74,7 +74,7 @@ pub type HostFunc = ) -> Result; pub trait FunctionImplProvider { - fn impls)>(f: &mut F); + fn impls)>(f: &mut F); } /// This trait can be used to check whether the host environment can satisfy @@ -83,5 +83,5 @@ pub trait ImportSatisfyCheck { /// Returns `true` if the host environment contains a function with /// the specified name and its type matches to the given type, or `false` /// otherwise. - fn can_satisfy(name: &[u8], func_type: &FunctionType) -> bool; + fn can_satisfy(module: &[u8], name: &[u8], func_type: &FunctionType) -> bool; } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 6fc6bc1764e4a..f7fde5ba17861 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -192,8 +192,8 @@ where let mut imports = sp_sandbox::EnvironmentDefinitionBuilder::new(); imports.add_memory(self::prepare::IMPORT_MODULE_MEMORY, "memory", memory.clone()); - runtime::Env::impls(&mut |name, func_ptr| { - imports.add_host_func(self::prepare::IMPORT_MODULE_FN, name, func_ptr); + runtime::Env::impls(&mut |module, name, func_ptr| { + imports.add_host_func(module, name, func_ptr); }); let mut runtime = Runtime::new( @@ -245,8 +245,8 @@ where mod tests { use super::*; use crate::{ - CodeHash, BalanceOf, Error, Module as Contracts, - exec::{Ext, StorageKey, AccountIdOf, Executable, RentParams}, + CodeHash, BalanceOf, Error, Pallet as Contracts, + exec::{Ext, StorageKey, AccountIdOf, Executable, SeedOf, BlockNumberOf, RentParams}, gas::GasMeter, tests::{Test, Call, ALICE, BOB}, }; @@ -414,8 +414,8 @@ mod tests { fn tombstone_deposit(&self) -> u64 { 16 } - fn random(&self, subject: &[u8]) -> H256 { - H256::from_slice(subject) + fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberOf) { + (H256::from_slice(subject), 42) } fn deposit_event(&mut self, topics: Vec, data: Vec) { self.events.push((topics, data)) @@ -515,7 +515,7 @@ mod tests { fn tombstone_deposit(&self) -> u64 { (**self).tombstone_deposit() } - fn random(&self, subject: &[u8]) -> H256 { + fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberOf) { (**self).random(subject) } fn deposit_event(&mut self, topics: Vec, data: Vec) { @@ -1531,6 +1531,85 @@ mod tests { ); } + const CODE_RANDOM_V1: &str = r#" +(module + (import "seal1" "seal_random" (func $seal_random (param i32 i32 i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0,128) is reserved for the result of PRNG. + + ;; the subject used for the PRNG. [128,160) + (data (i32.const 128) + "\00\01\02\03\04\05\06\07\08\09\0A\0B\0C\0D\0E\0F" + "\00\01\02\03\04\05\06\07\08\09\0A\0B\0C\0D\0E\0F" + ) + + ;; size of our buffer is 128 bytes + (data (i32.const 160) "\80") + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "call") + ;; This stores the block random seed in the buffer + (call $seal_random + (i32.const 128) ;; Pointer in memory to the start of the subject buffer + (i32.const 32) ;; The subject buffer's length + (i32.const 0) ;; Pointer to the output buffer + (i32.const 160) ;; Pointer to the output buffer length + ) + + ;; assert len == 32 + (call $assert + (i32.eq + (i32.load (i32.const 160)) + (i32.const 40) + ) + ) + + ;; return the random data + (call $seal_return + (i32.const 0) + (i32.const 0) + (i32.const 40) + ) + ) + (func (export "deploy")) +) +"#; + + #[test] + fn random_v1() { + let mut gas_meter = GasMeter::new(GAS_LIMIT); + + let output = execute( + CODE_RANDOM_V1, + vec![], + MockExt::default(), + &mut gas_meter, + ).unwrap(); + + // The mock ext just returns the same data that was passed as the subject. + assert_eq!( + output, + ExecReturnValue { + flags: ReturnFlags::empty(), + data: ( + hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F"), + 42u64, + ).encode(), + }, + ); + } + + const CODE_DEPOSIT_EVENT: &str = r#" (module (import "seal0" "seal_deposit_event" (func $seal_deposit_event (param i32 i32 i32 i32))) diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index caf6ef88c1ba0..15556b0c5cd06 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -28,11 +28,7 @@ use parity_wasm::elements::{self, Internal, External, MemoryType, Type, ValueTyp use sp_runtime::traits::Hash; use sp_std::prelude::*; -/// Currently, all imported functions must be located inside this module. We might support -/// additional modules for versioning later. -pub const IMPORT_MODULE_FN: &str = "seal0"; - -/// Imported memory must be located inside this module. The reason for that is that current +/// Imported memory must be located inside this module. The reason for hardcoding is that current /// compiler toolchains might not support specifying other modules than "env" for memory imports. pub const IMPORT_MODULE_MEMORY: &str = "env"; @@ -194,7 +190,7 @@ impl<'a, T: Config> ContractModule<'a, T> { let contract_module = pwasm_utils::inject_gas_counter( self.module, &gas_rules, - IMPORT_MODULE_FN + "seal0", ).map_err(|_| "gas instrumentation failed")?; Ok(ContractModule { module: contract_module, @@ -325,12 +321,7 @@ impl<'a, T: Config> ContractModule<'a, T> { let type_idx = match import.external() { &External::Table(_) => return Err("Cannot import tables"), &External::Global(_) => return Err("Cannot import globals"), - &External::Function(ref type_idx) => { - if import.module() != IMPORT_MODULE_FN { - return Err("Invalid module for imported function"); - } - type_idx - }, + &External::Function(ref type_idx) => type_idx, &External::Memory(ref memory_type) => { if import.module() != IMPORT_MODULE_MEMORY { return Err("Invalid module for imported memory"); @@ -363,7 +354,9 @@ impl<'a, T: Config> ContractModule<'a, T> { } if import_fn_banlist.iter().any(|f| import.field().as_bytes() == *f) - || !C::can_satisfy(import.field().as_bytes(), func_ty) + || !C::can_satisfy( + import.module().as_bytes(), import.field().as_bytes(), func_ty, + ) { return Err("module imports a non-existent function"); } @@ -498,7 +491,7 @@ pub mod benchmarking { use parity_wasm::elements::FunctionType; impl ImportSatisfyCheck for () { - fn can_satisfy(_name: &[u8], _func_type: &FunctionType) -> bool { + fn can_satisfy(_module: &[u8], _name: &[u8], _func_type: &FunctionType) -> bool { true } } @@ -526,9 +519,8 @@ pub mod benchmarking { #[cfg(test)] mod tests { use super::*; - use crate::{exec::Ext, Limits}; + use crate::{exec::Ext, schedule::Limits}; use std::fmt; - use assert_matches::assert_matches; impl fmt::Debug for PrefabWasmModule { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -544,14 +536,17 @@ mod tests { // Define test environment for tests. We need ImportSatisfyCheck // implementation from it. So actual implementations doesn't matter. define_env!(Test, , - panic(_ctx) => { unreachable!(); }, + [seal0] panic(_ctx) => { unreachable!(); }, // gas is an implementation defined function and a contract can't import it. - gas(_ctx, _amount: u32) => { unreachable!(); }, + [seal0] gas(_ctx, _amount: u32) => { unreachable!(); }, + + [seal0] nop(_ctx, _unused: u64) => { unreachable!(); }, - nop(_ctx, _unused: u64) => { unreachable!(); }, + // new version of nop with other data type for argumebt + [seal1] nop(_ctx, _unused: i32) => { unreachable!(); }, - seal_println(_ctx, _ptr: u32, _len: u32) => { unreachable!(); }, + [seal0] seal_println(_ctx, _ptr: u32, _len: u32) => { unreachable!(); }, ); } @@ -572,7 +567,7 @@ mod tests { .. Default::default() }; let r = do_preparation::(wasm, &schedule); - assert_matches!(r, $($expected)*); + assert_matches::assert_matches!(r, $($expected)*); } }; } @@ -905,30 +900,16 @@ mod tests { Err("Invalid module for imported memory") ); - // functions are in "env" and not in "seal0" - prepare_test!(function_not_in_env, - r#" - (module - (import "env" "nop" (func (param i64))) - - (func (export "call")) - (func (export "deploy")) - ) - "#, - Err("Invalid module for imported function") - ); - - // functions are in "seal0" and not in in some arbitrary module - prepare_test!(function_not_arbitrary_module, + prepare_test!(function_in_other_module_works, r#" (module - (import "any_module" "nop" (func (param i64))) + (import "seal1" "nop" (func (param i32))) (func (export "call")) (func (export "deploy")) ) "#, - Err("Invalid module for imported function") + Ok(_) ); // wrong signature @@ -983,7 +964,7 @@ mod tests { let mut schedule = Schedule::default(); schedule.enable_println = true; let r = do_preparation::(wasm, &schedule); - assert_matches!(r, Ok(_)); + assert_matches::assert_matches!(r, Ok(_)); } } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 2ceac1c51604e..f3757e4c2b10d 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -18,10 +18,11 @@ //! Environment definition of the wasm smart-contract runtime. use crate::{ - HostFnWeights, Config, CodeHash, BalanceOf, Error, + Config, CodeHash, BalanceOf, Error, exec::{Ext, StorageKey, TopicOf}, gas::{GasMeter, Token, ChargedAmount}, wasm::env_def::ConvertibleToWasm, + schedule::HostFnWeights, }; use parity_wasm::elements::ValueType; use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; @@ -629,7 +630,7 @@ define_env!(Env, , // This call is supposed to be called only by instrumentation injected code. // // - amount: How much gas is used. - gas(ctx, amount: u32) => { + [seal0] gas(ctx, amount: u32) => { ctx.charge_gas(RuntimeToken::MeteringBlock(amount))?; Ok(()) }, @@ -649,7 +650,7 @@ define_env!(Env, , // // - If value length exceeds the configured maximum value length of a storage entry. // - Upon trying to set an empty storage entry (value length is 0). - seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) => { + [seal0] seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) => { ctx.charge_gas(RuntimeToken::SetStorage(value_len))?; if value_len > ctx.ext.max_value_size() { Err(Error::::ValueTooLarge)?; @@ -665,7 +666,7 @@ define_env!(Env, , // # Parameters // // - `key_ptr`: pointer into the linear memory where the location to clear the value is placed. - seal_clear_storage(ctx, key_ptr: u32) => { + [seal0] seal_clear_storage(ctx, key_ptr: u32) => { ctx.charge_gas(RuntimeToken::ClearStorage)?; let mut key: StorageKey = [0; 32]; ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; @@ -684,7 +685,7 @@ define_env!(Env, , // # Errors // // `ReturnCode::KeyNotFound` - seal_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + [seal0] seal_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { ctx.charge_gas(RuntimeToken::GetStorageBase)?; let mut key: StorageKey = [0; 32]; ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; @@ -713,7 +714,7 @@ define_env!(Env, , // // `ReturnCode::BelowSubsistenceThreshold` // `ReturnCode::TransferFailed` - seal_transfer( + [seal0] seal_transfer( ctx, account_ptr: u32, account_len: u32, @@ -767,7 +768,7 @@ define_env!(Env, , // `ReturnCode::BelowSubsistenceThreshold` // `ReturnCode::TransferFailed` // `ReturnCode::NotCallable` - seal_call( + [seal0] seal_call( ctx, callee_ptr: u32, callee_len: u32, @@ -868,7 +869,7 @@ define_env!(Env, , // `ReturnCode::TransferFailed` // `ReturnCode::NewContractNotFunded` // `ReturnCode::CodeNotFound` - seal_instantiate( + [seal0] seal_instantiate( ctx, code_hash_ptr: u32, code_hash_len: u32, @@ -950,7 +951,7 @@ define_env!(Env, , // - The contract is live i.e is already on the call stack. // - Failed to send the balance to the beneficiary. // - The deletion queue is full. - seal_terminate( + [seal0] seal_terminate( ctx, beneficiary_ptr: u32, beneficiary_len: u32 @@ -981,7 +982,7 @@ define_env!(Env, , // # Note // // This function can only be called once. Calling it multiple times will trigger a trap. - seal_input(ctx, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_input(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::InputBase)?; if let Some(input) = ctx.input_data.take() { ctx.write_sandbox_output(out_ptr, out_len_ptr, &input, false, |len| { @@ -1010,7 +1011,7 @@ define_env!(Env, , // --- msb --- // // Using a reserved bit triggers a trap. - seal_return(ctx, flags: u32, data_ptr: u32, data_len: u32) => { + [seal0] seal_return(ctx, flags: u32, data_ptr: u32, data_len: u32) => { ctx.charge_gas(RuntimeToken::Return(data_len))?; Err(TrapReason::Return(ReturnData { flags, @@ -1028,7 +1029,7 @@ define_env!(Env, , // If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the // extrinsic will be returned. Otherwise, if this call is initiated by another contract then the // address of the contract will be returned. The value is encoded as T::AccountId. - seal_caller(ctx, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_caller(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::Caller)?; Ok(ctx.write_sandbox_output( out_ptr, out_len_ptr, &ctx.ext.caller().encode(), false, already_charged @@ -1041,7 +1042,7 @@ define_env!(Env, , // `out_len_ptr` must point to a u32 value that describes the available space at // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. - seal_address(ctx, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_address(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::Address)?; Ok(ctx.write_sandbox_output( out_ptr, out_len_ptr, &ctx.ext.address().encode(), false, already_charged @@ -1061,7 +1062,7 @@ define_env!(Env, , // // It is recommended to avoid specifying very small values for `gas` as the prices for a single // gas can be smaller than one. - seal_weight_to_fee(ctx, gas: u64, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_weight_to_fee(ctx, gas: u64, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::WeightToFee)?; Ok(ctx.write_sandbox_output( out_ptr, out_len_ptr, &ctx.ext.get_weight_price(gas).encode(), false, already_charged @@ -1076,7 +1077,7 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as Gas. - seal_gas_left(ctx, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_gas_left(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::GasLeft)?; Ok(ctx.write_sandbox_output( out_ptr, out_len_ptr, &ctx.gas_meter.gas_left().encode(), false, already_charged @@ -1091,7 +1092,7 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as T::Balance. - seal_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::Balance)?; Ok(ctx.write_sandbox_output( out_ptr, out_len_ptr, &ctx.ext.balance().encode(), false, already_charged @@ -1106,7 +1107,7 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as T::Balance. - seal_value_transferred(ctx, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_value_transferred(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::ValueTransferred)?; Ok(ctx.write_sandbox_output( out_ptr, out_len_ptr, &ctx.ext.value_transferred().encode(), false, already_charged @@ -1121,7 +1122,43 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as T::Hash. - seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { + // + // # Deprecation + // + // This function is deprecated. Users should migrate to the version in the "seal1" module. + [seal0] seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeToken::Random)?; + if subject_len > ctx.ext.schedule().limits.subject_len { + Err(Error::::RandomSubjectTooLong)?; + } + let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).0.encode(), false, already_charged + )?) + }, + + // Stores a random number for the current block and the given subject into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as (T::Hash, T::BlockNumber). + // + // # Changes from v0 + // + // In addition to the seed it returns the block number since which it was determinable + // by chain observers. + // + // # Note + // + // The returned seed should only be used to distinguish commitments made before + // the returned block number. If the block number is too early (i.e. commitments were + // made afterwards), then ensure no further commitments may be made and repeatedly + // call this on later blocks until the block number returned is later than the latest + // commitment. + [seal1] seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::Random)?; if subject_len > ctx.ext.schedule().limits.subject_len { Err(Error::::RandomSubjectTooLong)?; @@ -1138,7 +1175,7 @@ define_env!(Env, , // `out_len_ptr` must point to a u32 value that describes the available space at // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. - seal_now(ctx, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_now(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::Now)?; Ok(ctx.write_sandbox_output( out_ptr, out_len_ptr, &ctx.ext.now().encode(), false, already_charged @@ -1148,7 +1185,7 @@ define_env!(Env, , // Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. // // The data is encoded as T::Balance. - seal_minimum_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_minimum_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::MinimumBalance)?; Ok(ctx.write_sandbox_output( out_ptr, out_len_ptr, &ctx.ext.minimum_balance().encode(), false, already_charged @@ -1170,7 +1207,7 @@ define_env!(Env, , // a contract to leave a tombstone the balance of the contract must not go // below the sum of existential deposit and the tombstone deposit. The sum // is commonly referred as subsistence threshold in code. - seal_tombstone_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_tombstone_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::TombstoneDeposit)?; Ok(ctx.write_sandbox_output( out_ptr, out_len_ptr, &ctx.ext.tombstone_deposit().encode(), false, already_charged @@ -1208,7 +1245,7 @@ define_env!(Env, , // - Tombstone hashes do not match. // - The calling contract is already present on the call stack. // - The supplied code_hash does not exist on-chain. - seal_restore_to( + [seal0] seal_restore_to( ctx, dest_ptr: u32, dest_len: u32, @@ -1279,8 +1316,13 @@ define_env!(Env, , // - topics_len - the length of the topics buffer. Pass 0 if you want to pass an empty vector. // - data_ptr - a pointer to a raw data buffer which will saved along the event. // - data_len - the length of the data buffer. - seal_deposit_event(ctx, topics_ptr: u32, topics_len: u32, data_ptr: u32, data_len: u32) => { - + [seal0] seal_deposit_event( + ctx, + topics_ptr: u32, + topics_len: u32, + data_ptr: u32, + data_len: u32 + ) => { fn has_duplicates(items: &mut Vec) -> bool { // # Warning // @@ -1336,7 +1378,7 @@ define_env!(Env, , // - value_ptr: a pointer to the buffer with value, how much to allow for rent // Should be decodable as a `T::Balance`. Traps otherwise. // - value_len: length of the value buffer. - seal_set_rent_allowance(ctx, value_ptr: u32, value_len: u32) => { + [seal0] seal_set_rent_allowance(ctx, value_ptr: u32, value_len: u32) => { ctx.charge_gas(RuntimeToken::SetRentAllowance)?; let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr, value_len)?; @@ -1353,7 +1395,7 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as T::Balance. - seal_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::RentAllowance)?; Ok(ctx.write_sandbox_output( out_ptr, out_len_ptr, &ctx.ext.rent_allowance().encode(), false, already_charged @@ -1363,10 +1405,10 @@ define_env!(Env, , // Prints utf8 encoded string from the data buffer. // Only available on `--dev` chains. // This function may be removed at any time, superseded by a more general contract debugging feature. - seal_println(ctx, str_ptr: u32, str_len: u32) => { + [seal0] seal_println(ctx, str_ptr: u32, str_len: u32) => { let data = ctx.read_sandbox_memory(str_ptr, str_len)?; if let Ok(utf8) = core::str::from_utf8(&data) { - sp_runtime::print(utf8); + log::info!(target: "runtime::contracts", "seal_println: {}", utf8); } Ok(()) }, @@ -1377,7 +1419,7 @@ define_env!(Env, , // `out_len_ptr` must point to a u32 value that describes the available space at // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. - seal_block_number(ctx, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_block_number(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::BlockNumber)?; Ok(ctx.write_sandbox_output( out_ptr, out_len_ptr, &ctx.ext.block_number().encode(), false, already_charged @@ -1404,7 +1446,7 @@ define_env!(Env, , // - `output_ptr`: the pointer into the linear memory where the output // data is placed. The function will write the result // directly into this buffer. - seal_hash_sha2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + [seal0] seal_hash_sha2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { ctx.charge_gas(RuntimeToken::HashSha256(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(sha2_256, input_ptr, input_len, output_ptr)?) }, @@ -1429,7 +1471,7 @@ define_env!(Env, , // - `output_ptr`: the pointer into the linear memory where the output // data is placed. The function will write the result // directly into this buffer. - seal_hash_keccak_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + [seal0] seal_hash_keccak_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { ctx.charge_gas(RuntimeToken::HashKeccak256(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(keccak_256, input_ptr, input_len, output_ptr)?) }, @@ -1454,7 +1496,7 @@ define_env!(Env, , // - `output_ptr`: the pointer into the linear memory where the output // data is placed. The function will write the result // directly into this buffer. - seal_hash_blake2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + [seal0] seal_hash_blake2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { ctx.charge_gas(RuntimeToken::HashBlake256(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(blake2_256, input_ptr, input_len, output_ptr)?) }, @@ -1479,7 +1521,7 @@ define_env!(Env, , // - `output_ptr`: the pointer into the linear memory where the output // data is placed. The function will write the result // directly into this buffer. - seal_hash_blake2_128(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + [seal0] seal_hash_blake2_128(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { ctx.charge_gas(RuntimeToken::HashBlake128(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(blake2_128, input_ptr, input_len, output_ptr)?) }, @@ -1495,7 +1537,7 @@ define_env!(Env, , // // If no chain extension exists the contract will trap with the `NoChainExtension` // module error. - seal_call_chain_extension( + [seal0] seal_call_chain_extension( ctx, func_id: u32, input_ptr: u32, @@ -1531,7 +1573,7 @@ define_env!(Env, , // The returned information was collected and cached when the current contract call // started execution. Any change to those values that happens due to actions of the // current call or contracts that are called by this contract are not considered. - seal_rent_params(ctx, out_ptr: u32, out_len_ptr: u32) => { + [seal0] seal_rent_params(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeToken::RentParams)?; Ok(ctx.write_sandbox_output( out_ptr, out_len_ptr, &ctx.ext.rent_params().encode(), false, already_charged diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 57447944d22a7..40bc99ec12e01 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -24,10 +24,10 @@ use frame_support::{ IterableStorageMap, traits::{Currency, Get, EnsureOrigin, OnInitialize, UnfilteredDispatchable, schedule::DispatchTime}, }; -use frame_system::{RawOrigin, Module as System, self, EventRecord}; +use frame_system::{RawOrigin, Pallet as System, self, EventRecord}; use sp_runtime::traits::{Bounded, One}; -use crate::Module as Democracy; +use crate::Pallet as Democracy; const SEED: u32 = 0; const MAX_REFERENDUMS: u32 = 99; diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index a7dd2d5bd9297..b3b37b0b34b68 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -17,8 +17,8 @@ //! # Democracy Pallet //! -//! - [`democracy::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! @@ -596,7 +596,7 @@ decl_module! { if let Some((until, _)) = >::get(proposal_hash) { ensure!( - >::block_number() >= until, + >::block_number() >= until, Error::::ProposalBlacklisted, ); } @@ -688,7 +688,7 @@ decl_module! { ensure!(!>::exists(), Error::::DuplicateProposal); if let Some((until, _)) = >::get(proposal_hash) { ensure!( - >::block_number() >= until, + >::block_number() >= until, Error::::ProposalBlacklisted, ); } @@ -776,7 +776,7 @@ decl_module! { ensure!(proposal_hash == e_proposal_hash, Error::::InvalidHash); >::kill(); - let now = >::block_number(); + let now = >::block_number(); Self::inject_referendum(now + voting_period, proposal_hash, threshold, delay); } @@ -806,7 +806,7 @@ decl_module! { .err().ok_or(Error::::AlreadyVetoed)?; existing_vetoers.insert(insert_position, who.clone()); - let until = >::block_number() + T::CooloffPeriod::get(); + let until = >::block_number() + T::CooloffPeriod::get(); >::insert(&proposal_hash, (until, existing_vetoers)); Self::deposit_event(RawEvent::Vetoed(who, proposal_hash, until)); @@ -1004,13 +1004,14 @@ decl_module! { _ => None, }).ok_or(Error::::PreimageMissing)?; - let now = >::block_number(); + let now = >::block_number(); let (voting, enactment) = (T::VotingPeriod::get(), T::EnactmentPeriod::get()); let additional = if who == provider { Zero::zero() } else { enactment }; ensure!(now >= since + voting + additional, Error::::TooEarly); ensure!(expiry.map_or(true, |e| now > e), Error::::Imminent); - let _ = T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); + let res = T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); + debug_assert!(res.is_ok()); >::remove(&proposal_hash); Self::deposit_event(RawEvent::PreimageReaped(proposal_hash, provider, deposit, who)); } @@ -1209,7 +1210,7 @@ impl Module { delay: T::BlockNumber ) -> ReferendumIndex { >::inject_referendum( - >::block_number() + T::VotingPeriod::get(), + >::block_number() + T::VotingPeriod::get(), proposal_hash, threshold, delay @@ -1308,7 +1309,7 @@ impl Module { Some(ReferendumInfo::Finished{end, approved}) => if let Some((lock_periods, balance)) = votes[i].1.locked_if(approved) { let unlock_at = end + T::EnactmentPeriod::get() * lock_periods.into(); - let now = system::Module::::block_number(); + let now = system::Pallet::::block_number(); if now < unlock_at { ensure!(matches!(scope, UnvoteScope::Any), Error::::NoPermission); prior.accumulate(unlock_at, balance) @@ -1435,7 +1436,7 @@ impl Module { } => { // remove any delegation votes to our current target. let votes = Self::reduce_upstream_delegation(&target, conviction.votes(balance)); - let now = system::Module::::block_number(); + let now = system::Pallet::::block_number(); let lock_periods = conviction.lock_periods().into(); prior.accumulate(now + T::EnactmentPeriod::get() * lock_periods, balance); voting.set_common(delegations, prior); @@ -1455,7 +1456,7 @@ impl Module { /// a security hole) but may be reduced from what they are currently. fn update_lock(who: &T::AccountId) { let lock_needed = VotingOf::::mutate(who, |voting| { - voting.rejig(system::Module::::block_number()); + voting.rejig(system::Pallet::::block_number()); voting.locked_balance() }); if lock_needed.is_zero() { @@ -1541,7 +1542,8 @@ impl Module { let preimage = >::take(&proposal_hash); if let Some(PreimageStatus::Available { data, provider, deposit, .. }) = preimage { if let Ok(proposal) = T::Proposal::decode(&mut &data[..]) { - let _ = T::Currency::unreserve(&provider, deposit); + let err_amount = T::Currency::unreserve(&provider, deposit); + debug_assert!(err_amount.is_zero()); Self::deposit_event(RawEvent::PreimageUsed(proposal_hash, provider, deposit)); let ok = proposal.dispatch(frame_system::RawOrigin::Root.into()).is_ok(); @@ -1716,7 +1718,7 @@ impl Module { .saturating_mul(T::PreimageByteDeposit::get()); T::Currency::reserve(&who, deposit)?; - let now = >::block_number(); + let now = >::block_number(); let a = PreimageStatus::Available { data: encoded_proposal, provider: who.clone(), @@ -1738,7 +1740,7 @@ impl Module { let status = Preimages::::get(&proposal_hash).ok_or(Error::::NotImminent)?; let expiry = status.to_missing_expiry().ok_or(Error::::DuplicatePreimage)?; - let now = >::block_number(); + let now = >::block_number(); let free = >::zero(); let a = PreimageStatus::Available { data: encoded_proposal, diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 291cfa33b5224..57e845ace9f24 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -60,10 +60,10 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Scheduler: pallet_scheduler::{Module, Call, Storage, Config, Event}, - Democracy: pallet_democracy::{Module, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Scheduler: pallet_scheduler::{Pallet, Call, Storage, Config, Event}, + Democracy: pallet_democracy::{Pallet, Call, Storage, Config, Event}, } ); @@ -161,7 +161,7 @@ impl Contains for OneToFive { impl Config for Test { type Proposal = Call; type Event = Event; - type Currency = pallet_balances::Module; + type Currency = pallet_balances::Pallet; type EnactmentPeriod = EnactmentPeriod; type LaunchPeriod = LaunchPeriod; type VotingPeriod = VotingPeriod; diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index 1d63f9df40a25..4b5178faa8e86 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -26,7 +26,7 @@ sp-std = { version = "3.0.0", default-features = false, path = "../../primitives sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-npos-elections = { version = "3.0.0", default-features = false, path = "../../primitives/npos-elections" } sp-arithmetic = { version = "3.0.0", default-features = false, path = "../../primitives/arithmetic" } -sp-election-providers = { version = "3.0.0", default-features = false, path = "../../primitives/election-providers" } +frame-election-provider-support = { version = "3.0.0", default-features = false, path = "../election-provider-support" } # Optional imports for benchmarking frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } @@ -41,9 +41,9 @@ substrate-test-utils = { version = "3.0.0", path = "../../test-utils" } sp-io = { version = "3.0.0", path = "../../primitives/io" } sp-core = { version = "3.0.0", path = "../../primitives/core" } sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } -sp-election-providers = { version = "3.0.0", features = ["runtime-benchmarks"], path = "../../primitives/election-providers" } +frame-election-provider-support = { version = "3.0.0", features = ["runtime-benchmarks"], path = "../election-provider-support" } pallet-balances = { version = "3.0.0", path = "../balances" } -frame-benchmarking = { path = "../benchmarking" , version = "3.1.0"} +frame-benchmarking = { version = "3.1.0", path = "../benchmarking" } [features] default = ["std"] @@ -60,7 +60,7 @@ std = [ "sp-runtime/std", "sp-npos-elections/std", "sp-arithmetic/std", - "sp-election-providers/std", + "frame-election-provider-support/std", "log/std", ] runtime-benchmarks = [ diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs index 74db28c6e3929..40c7e801ae78d 100644 --- a/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,18 +18,17 @@ //! Two phase election pallet benchmarking. use super::*; -use crate::Module as MultiPhase; - -pub use frame_benchmarking::{account, benchmarks, whitelist_account, whitelisted_caller}; +use crate::Pallet as MultiPhase; +use frame_benchmarking::impl_benchmark_test_suite; use frame_support::{assert_ok, traits::OnInitialize}; use frame_system::RawOrigin; use rand::{prelude::SliceRandom, rngs::SmallRng, SeedableRng}; -use sp_election_providers::Assignment; +use frame_election_provider_support::Assignment; use sp_arithmetic::traits::One; use sp_runtime::InnerOf; use sp_std::convert::TryInto; -const SEED: u32 = 0; +const SEED: u32 = 999; /// Creates a **valid** solution with exactly the given size. /// @@ -55,9 +54,9 @@ fn solution_with_size( // first generates random targets. let targets: Vec = - (0..size.targets).map(|i| account("Targets", i, SEED)).collect(); + (0..size.targets).map(|i| frame_benchmarking::account("Targets", i, SEED)).collect(); - let mut rng = SmallRng::seed_from_u64(999u64); + let mut rng = SmallRng::seed_from_u64(SEED as u64); // decide who are the winners. let winners = targets @@ -75,7 +74,7 @@ fn solution_with_size( .choose_multiple(&mut rng, >::LIMIT) .cloned() .collect::>(); - let voter = account::("Voter", i, SEED); + let voter = frame_benchmarking::account::("Voter", i, SEED); (voter, stake, winner_votes) }) .collect::>(); @@ -89,7 +88,7 @@ fn solution_with_size( .choose_multiple(&mut rng, >::LIMIT) .cloned() .collect::>(); - let voter = account::("Voter", i, SEED); + let voter = frame_benchmarking::account::("Voter", i, SEED); (voter, stake, votes) }) .collect::>(); @@ -109,13 +108,14 @@ fn solution_with_size( >::put(desired_targets); >::put(RoundSnapshot { voters: all_voters.clone(), targets: targets.clone() }); - // write the snapshot to staking or whoever is the data provider. - T::DataProvider::put_snapshot(all_voters.clone(), targets.clone()); + // write the snapshot to staking or whoever is the data provider, in case it is needed further + // down the road. + T::DataProvider::put_snapshot(all_voters.clone(), targets.clone(), Some(stake)); let cache = helpers::generate_voter_cache::(&all_voters); let stake_of = helpers::stake_of_fn::(&all_voters, &cache); let voter_index = helpers::voter_index_fn::(&cache); - let target_index = helpers::target_index_fn_linear::(&targets); + let target_index = helpers::target_index_fn::(&targets); let voter_at = helpers::voter_at_fn::(&all_voters); let target_at = helpers::target_at_fn::(&targets); @@ -138,10 +138,12 @@ fn solution_with_size( >::from_assignment(assignments, &voter_index, &target_index).unwrap(); let score = compact.clone().score(&winners, stake_of, voter_at, target_at).unwrap(); let round = >::round(); + + assert!(score[0] > 0, "score is zero, this probably means that the stakes are not set."); RawSolution { compact, score, round } } -benchmarks! { +frame_benchmarking::benchmarks! { on_initialize_nothing { assert!(>::current_phase().is_off()); }: { @@ -157,7 +159,7 @@ benchmarks! { assert!(>::snapshot().is_none()); assert!(>::current_phase().is_off()); }: { - >::on_initialize_open_signed(); + >::on_initialize_open_signed().unwrap(); } verify { assert!(>::snapshot().is_some()); assert!(>::current_phase().is_signed()); @@ -167,7 +169,7 @@ benchmarks! { assert!(>::snapshot().is_none()); assert!(>::current_phase().is_off()); }: { - >::on_initialize_open_unsigned(true, true, 1u32.into()); + >::on_initialize_open_unsigned(true, true, 1u32.into()).unwrap(); } verify { assert!(>::snapshot().is_some()); assert!(>::current_phase().is_unsigned()); @@ -175,21 +177,51 @@ benchmarks! { on_initialize_open_unsigned_without_snapshot { // need to assume signed phase was open before - >::on_initialize_open_signed(); + >::on_initialize_open_signed().unwrap(); assert!(>::snapshot().is_some()); assert!(>::current_phase().is_signed()); }: { - >::on_initialize_open_unsigned(false, true, 1u32.into()); + >::on_initialize_open_unsigned(false, true, 1u32.into()).unwrap(); } verify { assert!(>::snapshot().is_some()); assert!(>::current_phase().is_unsigned()); } + // a call to `::elect` where we only return the queued solution. + elect_queued { + // assume largest values for the election status. These will merely affect the decoding. + let v = T::BenchmarkingConfig::VOTERS[1]; + let t = T::BenchmarkingConfig::TARGETS[1]; + let a = T::BenchmarkingConfig::ACTIVE_VOTERS[1]; + let d = T::BenchmarkingConfig::DESIRED_TARGETS[1]; + + let witness = SolutionOrSnapshotSize { voters: v, targets: t }; + let raw_solution = solution_with_size::(witness, a, d); + let ready_solution = + >::feasibility_check(raw_solution, ElectionCompute::Signed).unwrap(); + + // these are set by the `solution_with_size` function. + assert!(>::get().is_some()); + assert!(>::get().is_some()); + assert!(>::get().is_some()); + >::put(Phase::Signed); + // assume a queued solution is stored, regardless of where it comes from. + >::put(ready_solution); + }: { + let _ = as ElectionProvider>::elect(); + } verify { + assert!(>::queued_solution().is_none()); + assert!(>::get().is_none()); + assert!(>::get().is_none()); + assert!(>::get().is_none()); + assert_eq!(>::get(), >::Off); + } + #[extra] create_snapshot { assert!(>::snapshot().is_none()); }: { - >::create_snapshot() + >::create_snapshot().unwrap() } verify { assert!(>::snapshot().is_some()); } @@ -248,35 +280,8 @@ benchmarks! { } } -#[cfg(test)] -mod test { - use super::*; - use crate::mock::*; - - #[test] - fn test_benchmarks() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(test_benchmark_feasibility_check::()); - }); - - ExtBuilder::default().build_and_execute(|| { - assert_ok!(test_benchmark_submit_unsigned::()); - }); - - ExtBuilder::default().build_and_execute(|| { - assert_ok!(test_benchmark_on_initialize_open_unsigned_with_snapshot::()); - }); - - ExtBuilder::default().build_and_execute(|| { - assert_ok!(test_benchmark_on_initialize_open_unsigned_without_snapshot::()); - }); - - ExtBuilder::default().build_and_execute(|| { - assert_ok!(test_benchmark_on_initialize_nothing::()); - }); - - ExtBuilder::default().build_and_execute(|| { - assert_ok!(test_benchmark_create_snapshot::()); - }); - } -} +impl_benchmark_test_suite!( + MultiPhase, + crate::mock::ExtBuilder::default().build(), + crate::mock::Runtime, +); diff --git a/frame/election-provider-multi-phase/src/helpers.rs b/frame/election-provider-multi-phase/src/helpers.rs index dd97163d2859c..7894f71800fdb 100644 --- a/frame/election-provider-multi-phase/src/helpers.rs +++ b/frame/election-provider-multi-phase/src/helpers.rs @@ -18,14 +18,14 @@ //! Some helper functions/macros for this crate. use super::{Config, VoteWeight, CompactVoterIndexOf, CompactTargetIndexOf}; -use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, boxed::Box, prelude::*}; +use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, prelude::*}; #[macro_export] macro_rules! log { ($level:tt, $pattern:expr $(, $values:expr)* $(,)?) => { log::$level!( target: $crate::LOG_TARGET, - concat!("🗳 ", $pattern) $(, $values)* + concat!("[#{:?}] 🗳 ", $pattern), >::block_number() $(, $values)* ) }; } @@ -56,10 +56,10 @@ pub fn generate_voter_cache( /// The snapshot must be the same is the one used to create `cache`. pub fn voter_index_fn( cache: &BTreeMap, -) -> Box Option> + '_> { - Box::new(move |who| { +) -> impl Fn(&T::AccountId) -> Option> + '_ { + move |who| { cache.get(who).and_then(|i| >>::try_into(*i).ok()) - }) + } } /// Same as [`voter_index_fn`], but the returning index is converted into usize, if possible. @@ -69,8 +69,8 @@ pub fn voter_index_fn( /// The snapshot must be the same is the one used to create `cache`. pub fn voter_index_fn_usize( cache: &BTreeMap, -) -> Box Option + '_> { - Box::new(move |who| cache.get(who).cloned()) +) -> impl Fn(&T::AccountId) -> Option + '_ { + move |who| cache.get(who).cloned() } /// A non-optimized, linear version of [`voter_index_fn`] that does not need a cache and does a @@ -79,64 +79,90 @@ pub fn voter_index_fn_usize( /// ## Warning /// /// Not meant to be used in production. +#[cfg(test)] pub fn voter_index_fn_linear( snapshot: &Vec<(T::AccountId, VoteWeight, Vec)>, -) -> Box Option> + '_> { - Box::new(move |who| { +) -> impl Fn(&T::AccountId) -> Option> + '_ { + move |who| { snapshot .iter() .position(|(x, _, _)| x == who) .and_then(|i| >>::try_into(i).ok()) - }) + } } -/// Create a function the returns the index a targets in the snapshot. +/// Create a function the returns the index to a target in the snapshot. /// -/// The returning index type is the same as the one defined in `T::CompactSolution::Target`. +/// The returned index type is the same as the one defined in `T::CompactSolution::Target`. +/// +/// Note: to the extent possible, the returned function should be cached and reused. Producing that +/// function requires a `O(n log n)` data transform. Each invocation of that function completes +/// in `O(log n)`. +pub fn target_index_fn( + snapshot: &Vec, +) -> impl Fn(&T::AccountId) -> Option> + '_ { + let cache: BTreeMap<_, _> = + snapshot.iter().enumerate().map(|(idx, account_id)| (account_id, idx)).collect(); + move |who| { + cache + .get(who) + .and_then(|i| >>::try_into(*i).ok()) + } +} + +/// Create a function the returns the index to a target in the snapshot. +/// +/// The returned index type is the same as the one defined in `T::CompactSolution::Target`. +/// +/// ## Warning +/// +/// Not meant to be used in production. +#[cfg(test)] pub fn target_index_fn_linear( snapshot: &Vec, -) -> Box Option> + '_> { - Box::new(move |who| { +) -> impl Fn(&T::AccountId) -> Option> + '_ { + move |who| { snapshot .iter() .position(|x| x == who) .and_then(|i| >>::try_into(i).ok()) - }) + } } /// Create a function that can map a voter index ([`CompactVoterIndexOf`]) to the actual voter /// account using a linearly indexible snapshot. pub fn voter_at_fn( snapshot: &Vec<(T::AccountId, VoteWeight, Vec)>, -) -> Box) -> Option + '_> { - Box::new(move |i| { +) -> impl Fn(CompactVoterIndexOf) -> Option + '_ { + move |i| { as TryInto>::try_into(i) .ok() .and_then(|i| snapshot.get(i).map(|(x, _, _)| x).cloned()) - }) + } } /// Create a function that can map a target index ([`CompactTargetIndexOf`]) to the actual target /// account using a linearly indexible snapshot. pub fn target_at_fn( snapshot: &Vec, -) -> Box) -> Option + '_> { - Box::new(move |i| { +) -> impl Fn(CompactTargetIndexOf) -> Option + '_ { + move |i| { as TryInto>::try_into(i) .ok() .and_then(|i| snapshot.get(i).cloned()) - }) + } } /// Create a function to get the stake of a voter. /// /// This is not optimized and uses a linear search. +#[cfg(test)] pub fn stake_of_fn_linear( snapshot: &Vec<(T::AccountId, VoteWeight, Vec)>, -) -> Box VoteWeight + '_> { - Box::new(move |who| { +) -> impl Fn(&T::AccountId) -> VoteWeight + '_ { + move |who| { snapshot.iter().find(|(x, _, _)| x == who).map(|(_, x, _)| *x).unwrap_or_default() - }) + } } /// Create a function to get the stake of a voter. @@ -148,12 +174,12 @@ pub fn stake_of_fn_linear( pub fn stake_of_fn<'a, T: Config>( snapshot: &'a Vec<(T::AccountId, VoteWeight, Vec)>, cache: &'a BTreeMap, -) -> Box VoteWeight + 'a> { - Box::new(move |who| { +) -> impl Fn(&T::AccountId) -> VoteWeight + 'a { + move |who| { if let Some(index) = cache.get(who) { snapshot.get(*index).map(|(_, x, _)| x).cloned().unwrap_or_default() } else { 0 } - }) + } } diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 4ee6caae0a641..5545b3961124a 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -23,9 +23,10 @@ //! ## Phases //! //! The timeline of pallet is as follows. At each block, -//! [`sp_election_providers::ElectionDataProvider::next_election_prediction`] is used to estimate -//! the time remaining to the next call to [`sp_election_providers::ElectionProvider::elect`]. Based -//! on this, a phase is chosen. The timeline is as follows. +//! [`frame_election_provider_support::ElectionDataProvider::next_election_prediction`] is used to +//! estimate the time remaining to the next call to +//! [`frame_election_provider_support::ElectionProvider::elect`]. Based on this, a phase is chosen. +//! The timeline is as follows. //! //! ```ignore //! elect() @@ -149,7 +150,8 @@ //! are helpful for logging and are thus nested as: //! - [`ElectionError::Miner`]: wraps a [`unsigned::MinerError`]. //! - [`ElectionError::Feasibility`]: wraps a [`FeasibilityError`]. -//! - [`ElectionError::OnChainFallback`]: wraps a [`sp_election_providers::onchain::Error`]. +//! - [`ElectionError::OnChainFallback`]: wraps a +//! [`frame_election_provider_support::onchain::Error`]. //! //! Note that there could be an overlap between these sub-errors. For example, A //! `SnapshotUnavailable` can happen in both miner and feasibility check phase. @@ -184,10 +186,10 @@ //! //! **Recursive Fallback**: Currently, the fallback is a separate enum. A different and fancier way //! of doing this would be to have the fallback be another -//! [`sp_election_providers::ElectionProvider`]. In this case, this pallet can even have the -//! on-chain election provider as fallback, or special _noop_ fallback that simply returns an error, -//! thus replicating [`FallbackStrategy::Nothing`]. In this case, we won't need the additional -//! config OnChainAccuracy either. +//! [`frame_election_provider_support::ElectionProvider`]. In this case, this pallet can even have +//! the on-chain election provider as fallback, or special _noop_ fallback that simply returns an +//! error, thus replicating [`FallbackStrategy::Nothing`]. In this case, we won't need the +//! additional config OnChainAccuracy either. //! //! **Score based on (byte) size**: We should always prioritize small solutions over bigger ones, if //! there is a tie. Even more harsh should be to enforce the bound of the `reduce` algorithm. @@ -200,6 +202,15 @@ //! dependency from staking and the compact solution type. It should be generated at runtime, there //! it should be encoded how many votes each nominators have. Essentially translate //! to this pallet. +//! +//! **More accurate weight for error cases**: Both `ElectionDataProvider` and `ElectionProvider` +//! assume no weight is consumed in their functions, when operations fail with `Err`. This can +//! clearly be improved, but not a priority as we generally expect snapshot creation to fail only +//! due to extreme circumstances. +//! +//! **Take into account the encode/decode weight in benchmarks.** Currently, we only take into +//! account the weight of encode/decode in the `submit_unsigned` given its priority. Nonetheless, +//! all operations on the solution and the snapshot are worthy of taking this into account. #![cfg_attr(not(feature = "std"), no_std)] @@ -211,7 +222,7 @@ use frame_support::{ weights::Weight, }; use frame_system::{ensure_none, offchain::SendTransactionTypes}; -use sp_election_providers::{ElectionDataProvider, ElectionProvider, onchain}; +use frame_election_provider_support::{ElectionDataProvider, ElectionProvider, onchain}; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, is_score_better, CompactSolution, ElectionScore, EvaluateSupport, PerThing128, Supports, VoteWeight, @@ -222,6 +233,7 @@ use sp_runtime::{ TransactionValidityError, ValidTransaction, }, DispatchError, PerThing, Perbill, RuntimeDebug, SaturatedConversion, + traits::Bounded, }; use sp_std::prelude::*; use sp_arithmetic::{ @@ -261,6 +273,7 @@ struct OnChainConfig(sp_std::marker::PhantomData); impl onchain::Config for OnChainConfig { type AccountId = T::AccountId; type BlockNumber = T::BlockNumber; + type BlockWeights = T::BlockWeights; type Accuracy = T::OnChainAccuracy; type DataProvider = T::DataProvider; } @@ -368,11 +381,11 @@ impl Default for ElectionCompute { #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] pub struct RawSolution { /// Compact election edges. - compact: C, + pub compact: C, /// The _claimed_ score of the solution. - score: ElectionScore, + pub score: ElectionScore, /// The round at which this solution should be submitted. - round: u32, + pub round: u32, } impl Default for RawSolution { @@ -389,13 +402,13 @@ pub struct ReadySolution { /// /// This is target-major vector, storing each winners, total backing, and each individual /// backer. - supports: Supports, + pub supports: Supports, /// The score of the solution. /// /// This is needed to potentially challenge the solution. - score: ElectionScore, + pub score: ElectionScore, /// How this election was computed. - compute: ElectionCompute, + pub compute: ElectionCompute, } /// A snapshot of all the data that is needed for en entire round. They are provided by @@ -419,10 +432,10 @@ pub struct RoundSnapshot { pub struct SolutionOrSnapshotSize { /// The length of voters. #[codec(compact)] - voters: u32, + pub voters: u32, /// The length of targets. #[codec(compact)] - targets: u32, + pub targets: u32, } /// Internal errors of the pallet. @@ -436,6 +449,8 @@ pub enum ElectionError { Miner(unsigned::MinerError), /// An error in the on-chain fallback. OnChainFallback(onchain::Error), + /// An error happened in the data provider. + DataProvider(&'static str), /// No fallback is configured. This is a special case. NoFallbackConfigured, } @@ -563,17 +578,28 @@ pub mod pallet { match current_phase { Phase::Off if remaining <= signed_deadline && remaining > unsigned_deadline => { - Self::on_initialize_open_signed(); - log!(info, "Starting signed phase at #{:?} , round {}.", now, Self::round()); - T::WeightInfo::on_initialize_open_signed() + // NOTE: if signed-phase length is zero, second part of the if-condition fails. + match Self::on_initialize_open_signed() { + Ok(snap_weight) => { + log!(info, "Starting signed phase round {}.", Self::round()); + T::WeightInfo::on_initialize_open_signed().saturating_add(snap_weight) + } + Err(why) => { + // not much we can do about this at this point. + log!(warn, "failed to open signed phase due to {:?}", why); + T::WeightInfo::on_initialize_nothing() + // NOTE: ^^ The trait specifies that this is a noop in terms of weight + // in case of error. + } + } } Phase::Signed | Phase::Off - if remaining <= unsigned_deadline && remaining > 0u32.into() => + if remaining <= unsigned_deadline && remaining > Zero::zero() => { - let (need_snapshot, enabled, additional) = if current_phase == Phase::Signed { + // determine if followed by signed or not. + let (need_snapshot, enabled, signed_weight) = if current_phase == Phase::Signed { // followed by a signed phase: close the signed phase, no need for snapshot. - // TWO_PHASE_NOTE: later on once we have signed phase, this should return - // something else. + // TODO: proper weight https://github.com/paritytech/substrate/pull/7910. (false, true, Weight::zero()) } else { // no signed phase: create a new snapshot, definitely `enable` the unsigned @@ -581,15 +607,25 @@ pub mod pallet { (true, true, Weight::zero()) }; - Self::on_initialize_open_unsigned(need_snapshot, enabled, now); - log!(info, "Starting unsigned phase({}) at #{:?}.", enabled, now); - - let base_weight = if need_snapshot { - T::WeightInfo::on_initialize_open_unsigned_with_snapshot() - } else { - T::WeightInfo::on_initialize_open_unsigned_without_snapshot() - }; - base_weight.saturating_add(additional) + match Self::on_initialize_open_unsigned(need_snapshot, enabled, now) { + Ok(snap_weight) => { + log!(info, "Starting unsigned phase({}).", enabled); + let base_weight = if need_snapshot { + T::WeightInfo::on_initialize_open_unsigned_with_snapshot() + } else { + T::WeightInfo::on_initialize_open_unsigned_without_snapshot() + }; + + base_weight.saturating_add(snap_weight).saturating_add(signed_weight) + } + Err(why) => { + // not much we can do about this at this point. + log!(warn, "failed to open unsigned phase due to {:?}", why); + T::WeightInfo::on_initialize_nothing() + // NOTE: ^^ The trait specifies that this is a noop in terms of weight + // in case of error. + } + } } _ => T::WeightInfo::on_initialize_nothing(), } @@ -601,7 +637,7 @@ pub mod pallet { match Self::try_acquire_offchain_lock(n) { Ok(_) => { let outcome = Self::mine_check_and_submit().map_err(ElectionError::from); - log!(info, "miner exeuction done: {:?}", outcome); + log!(info, "mine_check_and_submit execution done: {:?}", outcome); } Err(why) => log!(warn, "denied offchain worker: {:?}", why), } @@ -642,6 +678,16 @@ pub mod pallet { let _: UpperOf> = maximum_chain_accuracy .iter() .fold(Zero::zero(), |acc, x| acc.checked_add(x).unwrap()); + + // We only accept data provider who's maximum votes per voter matches our + // `T::CompactSolution`'s `LIMIT`. + // + // NOTE that this pallet does not really need to enforce this in runtime. The compact + // solution cannot represent any voters more than `LIMIT` anyhow. + assert_eq!( + >::MAXIMUM_VOTES_PER_VOTER, + as CompactSolution>::LIMIT as u32, + ); } } @@ -838,32 +884,41 @@ pub mod pallet { } impl Pallet { - /// Logic for `::on_initialize` when signed phase is being opened. + /// Logic for [`::on_initialize`] when signed phase is being opened. /// /// This is decoupled for easy weight calculation. - pub(crate) fn on_initialize_open_signed() { + /// + /// Returns `Ok(snapshot_weight)` if success, where `snapshot_weight` is the weight that + /// needs to recorded for the creation of snapshot. + pub(crate) fn on_initialize_open_signed() -> Result { + let weight = Self::create_snapshot()?; >::put(Phase::Signed); - Self::create_snapshot(); Self::deposit_event(Event::SignedPhaseStarted(Self::round())); + Ok(weight.saturating_add(T::DbWeight::get().writes(1))) } - /// Logic for `>::on_initialize` when unsigned phase is being opened. + /// Logic for [`>::on_initialize`] when unsigned phase is being opened. + /// + /// This is decoupled for easy weight calculation. /// - /// This is decoupled for easy weight calculation. Note that the default weight benchmark of - /// this function will assume an empty signed queue for `finalize_signed_phase`. + /// Returns `Ok(snapshot_weight)` if success, where `snapshot_weight` is the weight that + /// needs to recorded for the creation of snapshot. pub(crate) fn on_initialize_open_unsigned( need_snapshot: bool, enabled: bool, now: T::BlockNumber, - ) { - if need_snapshot { + ) -> Result { + let weight = if need_snapshot { // if not being followed by a signed phase, then create the snapshots. debug_assert!(Self::snapshot().is_none()); - Self::create_snapshot(); - } + Self::create_snapshot()? + } else { + 0 + }; >::put(Phase::Unsigned((enabled, now))); Self::deposit_event(Event::UnsignedPhaseStarted(Self::round())); + Ok(weight.saturating_add(T::DbWeight::get().writes(1))) } /// Creates the snapshot. Writes new data to: @@ -871,18 +926,33 @@ impl Pallet { /// 1. [`SnapshotMetadata`] /// 2. [`RoundSnapshot`] /// 3. [`DesiredTargets`] - pub(crate) fn create_snapshot() { - // if any of them don't exist, create all of them. This is a bit conservative. - let targets = T::DataProvider::targets(); - let voters = T::DataProvider::voters(); - let desired_targets = T::DataProvider::desired_targets(); + /// + /// Returns `Ok(consumed_weight)` if operation is okay. + pub(crate) fn create_snapshot() -> Result { + let target_limit = >::max_value().saturated_into::(); + let voter_limit = >::max_value().saturated_into::(); + + let (targets, w1) = + T::DataProvider::targets(Some(target_limit)).map_err(ElectionError::DataProvider)?; + let (voters, w2) = + T::DataProvider::voters(Some(voter_limit)).map_err(ElectionError::DataProvider)?; + let (desired_targets, w3) = + T::DataProvider::desired_targets().map_err(ElectionError::DataProvider)?; + + // defensive-only + if targets.len() > target_limit || voters.len() > voter_limit { + debug_assert!(false, "Snapshot limit has not been respected."); + return Err(ElectionError::DataProvider("Snapshot too big for submission.")); + } + // only write snapshot if all existed. >::put(SolutionOrSnapshotSize { voters: voters.len() as u32, targets: targets.len() as u32, }); >::put(desired_targets); >::put(RoundSnapshot { voters, targets }); + Ok(w1.saturating_add(w2).saturating_add(w3).saturating_add(T::DbWeight::get().writes(3))) } /// Kill everything created by [`Pallet::create_snapshot`]. @@ -998,7 +1068,7 @@ impl Pallet { } /// On-chain fallback of election. - fn onchain_fallback() -> Result, ElectionError> { + fn onchain_fallback() -> Result<(Supports, Weight), ElectionError> { > as ElectionProvider< T::AccountId, T::BlockNumber, @@ -1006,25 +1076,33 @@ impl Pallet { .map_err(Into::into) } - fn do_elect() -> Result, ElectionError> { + fn do_elect() -> Result<(Supports, Weight), ElectionError> { >::take() .map_or_else( || match T::Fallback::get() { FallbackStrategy::OnChain => Self::onchain_fallback() - .map(|r| (r, ElectionCompute::OnChain)) + .map(|(s, w)| (s, w, ElectionCompute::OnChain)) .map_err(Into::into), FallbackStrategy::Nothing => Err(ElectionError::NoFallbackConfigured), }, - |ReadySolution { supports, compute, .. }| Ok((supports, compute)), + |ReadySolution { supports, compute, .. }| Ok(( + supports, + T::WeightInfo::elect_queued(), + compute + )), ) - .map(|(supports, compute)| { + .map(|(supports, weight, compute)| { Self::deposit_event(Event::ElectionFinalized(Some(compute))); - log!(info, "Finalized election round with compute {:?}.", compute); - supports + if Self::round() != 1 { + log!(info, "Finalized election round with compute {:?}.", compute); + } + (supports, weight) }) .map_err(|err| { Self::deposit_event(Event::ElectionFinalized(None)); - log!(warn, "Failed to finalize election round. reason {:?}", err); + if Self::round() != 1 { + log!(warn, "Failed to finalize election round. reason {:?}", err); + } err }) } @@ -1034,10 +1112,11 @@ impl ElectionProvider for Pallet { type Error = ElectionError; type DataProvider = T::DataProvider; - fn elect() -> Result, Self::Error> { - let outcome = Self::do_elect(); + fn elect() -> Result<(Supports, Weight), Self::Error> { + let outcome_and_weight = Self::do_elect(); + // IMPORTANT: regardless of if election was `Ok` or `Err`, we shall do some cleanup. Self::post_elect(); - outcome + outcome_and_weight } } @@ -1128,13 +1207,13 @@ mod feasibility_check { .compact .votes1 .iter_mut() - .filter(|(_, t)| *t == 3u16) + .filter(|(_, t)| *t == TargetIndex::from(3u16)) .for_each(|(_, t)| *t += 1); solution.compact.votes2.iter_mut().for_each(|(_, (t0, _), t1)| { - if *t0 == 3u16 { + if *t0 == TargetIndex::from(3u16) { *t0 += 1 }; - if *t1 == 3u16 { + if *t1 == TargetIndex::from(3u16) { *t1 += 1 }; }); @@ -1162,7 +1241,7 @@ mod feasibility_check { .compact .votes1 .iter_mut() - .filter(|(v, _)| *v == 7u32) + .filter(|(v, _)| *v == VoterIndex::from(7u32)) .map(|(v, _)| *v = 8) .count() > 0 ); @@ -1225,7 +1304,7 @@ mod feasibility_check { #[cfg(test)] mod tests { use super::{mock::*, Event, *}; - use sp_election_providers::ElectionProvider; + use frame_election_provider_support::ElectionProvider; use sp_npos_elections::Support; #[test] @@ -1397,7 +1476,7 @@ mod tests { #[test] fn fallback_strategy_works() { - ExtBuilder::default().fallabck(FallbackStrategy::OnChain).build_and_execute(|| { + ExtBuilder::default().fallback(FallbackStrategy::OnChain).build_and_execute(|| { roll_to(15); assert_eq!(MultiPhase::current_phase(), Phase::Signed); @@ -1405,7 +1484,7 @@ mod tests { assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // zilch solutions thus far. - let supports = MultiPhase::elect().unwrap(); + let (supports, _) = MultiPhase::elect().unwrap(); assert_eq!( supports, @@ -1416,7 +1495,7 @@ mod tests { ) }); - ExtBuilder::default().fallabck(FallbackStrategy::Nothing).build_and_execute(|| { + ExtBuilder::default().fallback(FallbackStrategy::Nothing).build_and_execute(|| { roll_to(15); assert_eq!(MultiPhase::current_phase(), Phase::Signed); @@ -1428,6 +1507,26 @@ mod tests { }) } + #[test] + fn snapshot_creation_fails_if_too_big() { + ExtBuilder::default().build_and_execute(|| { + Targets::set((0..(TargetIndex::max_value() as AccountId) + 1).collect::>()); + + // signed phase failed to open. + roll_to(15); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + // unsigned phase failed to open. + roll_to(25); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + // on-chain backup works though. + roll_to(29); + let (supports, _) = MultiPhase::elect().unwrap(); + assert!(supports.len() > 0); + }) + } + #[test] fn number_of_voters_allowed_2sec_block() { // Just a rough estimate with the substrate weights. diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index e7a2924fd2aa6..cebd5cf06e692 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -31,7 +31,7 @@ use sp_core::{ }, H256, }; -use sp_election_providers::ElectionDataProvider; +use frame_election_provider_support::{ElectionDataProvider, data_provider}; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, seq_phragmen, to_supports, to_without_backing, CompactSolution, ElectionResult, EvaluateSupport, @@ -52,18 +52,20 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Module, Call, Event, Config}, - Balances: pallet_balances::{Module, Call, Event, Config}, - MultiPhase: multi_phase::{Module, Call, Event}, + System: frame_system::{Pallet, Call, Event, Config}, + Balances: pallet_balances::{Pallet, Call, Event, Config}, + MultiPhase: multi_phase::{Pallet, Call, Event}, } ); pub(crate) type Balance = u64; pub(crate) type AccountId = u64; +pub(crate) type VoterIndex = u32; +pub(crate) type TargetIndex = u16; sp_npos_elections::generate_solution_type!( #[compact] - pub struct TestCompact::(16) + pub struct TestCompact::(16) ); /// All events of this pallet. @@ -239,6 +241,13 @@ impl multi_phase::weights::WeightInfo for DualMockWeightInfo { <() as multi_phase::weights::WeightInfo>::on_initialize_open_unsigned_without_snapshot() } } + fn elect_queued() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::elect_queued() + } + } fn submit_unsigned(v: u32, t: u32, a: u32, d: u32) -> Weight { if MockWeightInfo::get() { // 10 base @@ -291,18 +300,44 @@ pub struct ExtBuilder {} pub struct StakingMock; impl ElectionDataProvider for StakingMock { - fn targets() -> Vec { - Targets::get() + const MAXIMUM_VOTES_PER_VOTER: u32 = ::LIMIT as u32; + fn targets(maybe_max_len: Option) -> data_provider::Result<(Vec, Weight)> { + let targets = Targets::get(); + + if maybe_max_len.map_or(false, |max_len| targets.len() > max_len) { + return Err("Targets too big"); + } + + Ok((targets, 0)) } - fn voters() -> Vec<(AccountId, VoteWeight, Vec)> { - Voters::get() + + fn voters( + maybe_max_len: Option, + ) -> data_provider::Result<(Vec<(AccountId, VoteWeight, Vec)>, Weight)> { + let voters = Voters::get(); + if maybe_max_len.map_or(false, |max_len| voters.len() > max_len) { + return Err("Voters too big"); + } + + Ok((voters, 0)) } - fn desired_targets() -> u32 { - DesiredTargets::get() + fn desired_targets() -> data_provider::Result<(u32, Weight)> { + Ok((DesiredTargets::get(), 0)) } + fn next_election_prediction(now: u64) -> u64 { now + EpochLength::get() - now % EpochLength::get() } + + #[cfg(any(feature = "runtime-benchmarks", test))] + fn put_snapshot( + voters: Vec<(AccountId, VoteWeight, Vec)>, + targets: Vec, + _target_stake: Option, + ) { + Targets::set(targets); + Voters::set(voters); + } } impl ExtBuilder { @@ -319,7 +354,7 @@ impl ExtBuilder { ::set(unsigned); self } - pub fn fallabck(self, fallback: FallbackStrategy) -> Self { + pub fn fallback(self, fallback: FallbackStrategy) -> Self { ::set(fallback); self } diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 2039e5d9f0754..280907ac5439a 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -66,8 +66,17 @@ impl Pallet { let iters = Self::get_balancing_iters(); // get the solution, with a load of checks to ensure if submitted, IT IS ABSOLUTELY VALID. let (raw_solution, witness) = Self::mine_and_check(iters)?; + let score = raw_solution.score.clone(); + + let call: >>::OverarchingCall = + Call::submit_unsigned(raw_solution, witness).into(); + log!( + info, + "mined a solution with score {:?} and size {}", + score, + call.using_encoded(|b| b.len()) + ); - let call = Call::submit_unsigned(raw_solution, witness).into(); SubmitTransaction::>::submit_unsigned_transaction(call) .map_err(|_| MinerError::PoolSubmissionFailed) } @@ -136,7 +145,7 @@ impl Pallet { // closures. let cache = helpers::generate_voter_cache::(&voters); let voter_index = helpers::voter_index_fn::(&cache); - let target_index = helpers::target_index_fn_linear::(&targets); + let target_index = helpers::target_index_fn::(&targets); let voter_at = helpers::voter_at_fn::(&voters); let target_at = helpers::target_at_fn::(&targets); let stake_of = helpers::stake_of_fn::(&voters, &cache); @@ -159,12 +168,16 @@ impl Pallet { size, T::MinerMaxWeight::get(), ); + log!( debug, - "miner: current compact solution voters = {}, maximum_allowed = {}", + "initial solution voters = {}, snapshot = {:?}, maximum_allowed(capped) = {}", compact.voter_count(), + size, maximum_allowed_voters, ); + + // trim weight. let compact = Self::trim_compact(maximum_allowed_voters, compact, &voter_index)?; // re-calc score. @@ -243,10 +256,12 @@ impl Pallet { } } + log!(debug, "removed {} voter to meet the max weight limit.", to_remove); Ok(compact) } _ => { // nada, return as-is + log!(debug, "didn't remove any voter for weight limits."); Ok(compact) } } @@ -289,6 +304,7 @@ impl Pallet { // First binary-search the right amount of voters let mut step = voters / 2; let mut current_weight = weight_with(voters); + while step > 0 { match next_voters(current_weight, voters, step) { // proceed with the binary search @@ -315,13 +331,14 @@ impl Pallet { voters -= 1; } + let final_decision = voters.min(size.voters); debug_assert!( - weight_with(voters.min(size.voters)) <= max_weight, + weight_with(final_decision) <= max_weight, "weight_with({}) <= {}", - voters.min(size.voters), + final_decision, max_weight, ); - voters.min(size.voters) + final_decision } /// Checks if an execution of the offchain worker is permitted at the given block number, or @@ -413,6 +430,9 @@ mod max_weight { fn on_initialize_open_unsigned_with_snapshot() -> Weight { unreachable!() } + fn elect_queued() -> Weight { + 0 + } fn on_initialize_open_unsigned_without_snapshot() -> Weight { unreachable!() } @@ -487,7 +507,7 @@ mod tests { }; use frame_support::{dispatch::Dispatchable, traits::OffchainWorker}; use mock::Call as OuterCall; - use sp_election_providers::Assignment; + use frame_election_provider_support::Assignment; use sp_runtime::{traits::ValidateUnsigned, PerU16}; #[test] @@ -725,7 +745,6 @@ mod tests { roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); - // mine seq_phragmen solution with 2 iters. assert_eq!( MultiPhase::mine_check_and_submit().unwrap_err(), MinerError::PreDispatchChecksFailed, @@ -832,7 +851,7 @@ mod tests { } #[test] - fn ocw_only_runs_when_signed_open_now() { + fn ocw_only_runs_when_unsigned_open_now() { let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); ext.execute_with(|| { roll_to(25); diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index 276bba330d24c..3d3a5cede3293 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_election_provider_multi_phase //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-02-12, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2021-03-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -48,6 +48,7 @@ pub trait WeightInfo { fn on_initialize_open_signed() -> Weight; fn on_initialize_open_unsigned_with_snapshot() -> Weight; fn on_initialize_open_unsigned_without_snapshot() -> Weight; + fn elect_queued() -> Weight; fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight; fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight; } @@ -56,47 +57,50 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn on_initialize_nothing() -> Weight { - (23_401_000 as Weight) + (22_730_000 as Weight) .saturating_add(T::DbWeight::get().reads(7 as Weight)) } fn on_initialize_open_signed() -> Weight { - (79_260_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) + (112_051_000 as Weight) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn on_initialize_open_unsigned_with_snapshot() -> Weight { - (77_745_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) + (112_165_000 as Weight) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn on_initialize_open_unsigned_without_snapshot() -> Weight { - (21_764_000 as Weight) + (21_039_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } - fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { + fn elect_queued() -> Weight { + (7_362_949_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) + } + fn submit_unsigned(v: u32, _t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 23_000 - .saturating_add((4_171_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 78_000 - .saturating_add((229_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 23_000 - .saturating_add((13_661_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 117_000 - .saturating_add((4_499_000 as Weight).saturating_mul(d as Weight)) + // Standard Error: 21_000 + .saturating_add((3_933_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 21_000 + .saturating_add((13_520_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 107_000 + .saturating_add((2_880_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 12_000 - .saturating_add((4_232_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 42_000 - .saturating_add((636_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 12_000 - .saturating_add((10_294_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 64_000 - .saturating_add((4_428_000 as Weight).saturating_mul(d as Weight)) + // Standard Error: 10_000 + .saturating_add((4_069_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 36_000 + .saturating_add((503_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 10_000 + .saturating_add((10_000_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 54_000 + .saturating_add((3_734_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) } } @@ -104,47 +108,50 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn on_initialize_nothing() -> Weight { - (23_401_000 as Weight) + (22_730_000 as Weight) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) } fn on_initialize_open_signed() -> Weight { - (79_260_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + (112_051_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn on_initialize_open_unsigned_with_snapshot() -> Weight { - (77_745_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + (112_165_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn on_initialize_open_unsigned_without_snapshot() -> Weight { - (21_764_000 as Weight) + (21_039_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } - fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { + fn elect_queued() -> Weight { + (7_362_949_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + } + fn submit_unsigned(v: u32, _t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 23_000 - .saturating_add((4_171_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 78_000 - .saturating_add((229_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 23_000 - .saturating_add((13_661_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 117_000 - .saturating_add((4_499_000 as Weight).saturating_mul(d as Weight)) + // Standard Error: 21_000 + .saturating_add((3_933_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 21_000 + .saturating_add((13_520_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 107_000 + .saturating_add((2_880_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 12_000 - .saturating_add((4_232_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 42_000 - .saturating_add((636_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 12_000 - .saturating_add((10_294_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 64_000 - .saturating_add((4_428_000 as Weight).saturating_mul(d as Weight)) + // Standard Error: 10_000 + .saturating_add((4_069_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 36_000 + .saturating_add((503_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 10_000 + .saturating_add((10_000_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 54_000 + .saturating_add((3_734_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) } } diff --git a/primitives/election-providers/Cargo.toml b/frame/election-provider-support/Cargo.toml similarity index 57% rename from primitives/election-providers/Cargo.toml rename to frame/election-provider-support/Cargo.toml index cf12dce8098d7..b360cd89eb57b 100644 --- a/primitives/election-providers/Cargo.toml +++ b/frame/election-provider-support/Cargo.toml @@ -1,12 +1,12 @@ [package] -name = "sp-election-providers" +name = "frame-election-provider-support" version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" -description = "Primitive election providers" +description = "election provider supporting traits" readme = "README.md" [package.metadata.docs.rs] @@ -14,20 +14,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "3.0.0", default-features = false, path = "../std" } -sp-arithmetic = { version = "3.0.0", default-features = false, path = "../arithmetic" } -sp-npos-elections = { version = "3.0.0", default-features = false, path = "../npos-elections" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-arithmetic = { version = "3.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-npos-elections = { version = "3.0.0", default-features = false, path = "../../primitives/npos-elections" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-npos-elections = { version = "3.0.0", path = "../npos-elections" } -sp-runtime = { version = "3.0.0", path = "../runtime" } +sp-npos-elections = { version = "3.0.0", path = "../../primitives/npos-elections" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } [features] default = ["std"] -runtime-benchmarks = [] std = [ "codec/std", "sp-std/std", "sp-npos-elections/std", "sp-arithmetic/std", + "frame-support/std", + "frame-system/std", ] +runtime-benchmarks = [] diff --git a/primitives/election-providers/src/lib.rs b/frame/election-provider-support/src/lib.rs similarity index 72% rename from primitives/election-providers/src/lib.rs rename to frame/election-provider-support/src/lib.rs index 73ea58c176b26..b846460e71f8a 100644 --- a/primitives/election-providers/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -78,14 +78,15 @@ //! ## Example //! //! ```rust -//! # use sp_election_providers::*; +//! # use frame_election_provider_support::{*, data_provider}; //! # use sp_npos_elections::{Support, Assignment}; +//! # use frame_support::weights::Weight; //! //! type AccountId = u64; //! type Balance = u64; //! type BlockNumber = u32; //! -//! mod data_provider { +//! mod data_provider_mod { //! use super::*; //! //! pub trait Config: Sized { @@ -99,14 +100,17 @@ //! pub struct Module(std::marker::PhantomData); //! //! impl ElectionDataProvider for Module { -//! fn desired_targets() -> u32 { -//! 1 +//! const MAXIMUM_VOTES_PER_VOTER: u32 = 1; +//! fn desired_targets() -> data_provider::Result<(u32, Weight)> { +//! Ok((1, 0)) //! } -//! fn voters() -> Vec<(AccountId, VoteWeight, Vec)> { -//! Default::default() +//! fn voters(maybe_max_len: Option) +//! -> data_provider::Result<(Vec<(AccountId, VoteWeight, Vec)>, Weight)> +//! { +//! Ok((Default::default(), 0)) //! } -//! fn targets() -> Vec { -//! vec![10, 20, 30] +//! fn targets(maybe_max_len: Option) -> data_provider::Result<(Vec, Weight)> { +//! Ok((vec![10, 20, 30], 0)) //! } //! fn next_election_prediction(now: BlockNumber) -> BlockNumber { //! 0 @@ -125,29 +129,30 @@ //! } //! //! impl ElectionProvider for GenericElectionProvider { -//! type Error = (); +//! type Error = &'static str; //! type DataProvider = T::DataProvider; //! -//! fn elect() -> Result, Self::Error> { -//! Self::DataProvider::targets() -//! .first() -//! .map(|winner| vec![(*winner, Support::default())]) -//! .ok_or(()) +//! fn elect() -> Result<(Supports, Weight), Self::Error> { +//! Self::DataProvider::targets(None) +//! .map_err(|_| "failed to elect") +//! .map(|(t, weight)| { +//! (vec![(t[0], Support::default())], weight) +//! }) //! } //! } //! } //! //! mod runtime { //! use super::generic_election_provider; -//! use super::data_provider; +//! use super::data_provider_mod; //! use super::AccountId; //! //! struct Runtime; //! impl generic_election_provider::Config for Runtime { -//! type DataProvider = data_provider::Module; +//! type DataProvider = data_provider_mod::Module; //! } //! -//! impl data_provider::Config for Runtime { +//! impl data_provider_mod::Config for Runtime { //! type ElectionProvider = generic_election_provider::GenericElectionProvider; //! } //! @@ -160,23 +165,49 @@ pub mod onchain; use sp_std::{prelude::*, fmt::Debug}; +use frame_support::weights::Weight; /// Re-export some type as they are used in the interface. pub use sp_arithmetic::PerThing; -pub use sp_npos_elections::{Assignment, ExtendedBalance, PerThing128, Supports, VoteWeight}; +pub use sp_npos_elections::{ + Assignment, ExtendedBalance, PerThing128, Supports, Support, VoteWeight +}; + +/// Types that are used by the data provider trait. +pub mod data_provider { + /// Alias for the result type of the election data provider. + pub type Result = sp_std::result::Result; +} /// Something that can provide the data to an [`ElectionProvider`]. pub trait ElectionDataProvider { + /// Maximum number of votes per voter that this data provider is providing. + const MAXIMUM_VOTES_PER_VOTER: u32; + /// All possible targets for the election, i.e. the candidates. - fn targets() -> Vec; + /// + /// If `maybe_max_len` is `Some(v)` then the resulting vector MUST NOT be longer than `v` items + /// long. + /// + /// It is assumed that this function will only consume a notable amount of weight, when it + /// returns `Ok(_)`. + fn targets(maybe_max_len: Option) -> data_provider::Result<(Vec, Weight)>; /// All possible voters for the election. /// /// Note that if a notion of self-vote exists, it should be represented here. - fn voters() -> Vec<(AccountId, VoteWeight, Vec)>; + /// + /// If `maybe_max_len` is `Some(v)` then the resulting vector MUST NOT be longer than `v` items + /// long. + /// + /// It is assumed that this function will only consume a notable amount of weight, when it + /// returns `Ok(_)`. + fn voters( + maybe_max_len: Option, + ) -> data_provider::Result<(Vec<(AccountId, VoteWeight, Vec)>, Weight)>; /// The number of targets to elect. - fn desired_targets() -> u32; + fn desired_targets() -> data_provider::Result<(u32, Weight)>; /// Provide a best effort prediction about when the next election is about to happen. /// @@ -192,20 +223,24 @@ pub trait ElectionDataProvider { fn put_snapshot( _voters: Vec<(AccountId, VoteWeight, Vec)>, _targets: Vec, + _target_stake: Option, ) { } } #[cfg(feature = "std")] impl ElectionDataProvider for () { - fn targets() -> Vec { - Default::default() + const MAXIMUM_VOTES_PER_VOTER: u32 = 0; + fn targets(_maybe_max_len: Option) -> data_provider::Result<(Vec, Weight)> { + Ok(Default::default()) } - fn voters() -> Vec<(AccountId, VoteWeight, Vec)> { - Default::default() + fn voters( + _maybe_max_len: Option, + ) -> data_provider::Result<(Vec<(AccountId, VoteWeight, Vec)>, Weight)> { + Ok(Default::default()) } - fn desired_targets() -> u32 { - Default::default() + fn desired_targets() -> data_provider::Result<(u32, Weight)> { + Ok(Default::default()) } fn next_election_prediction(now: BlockNumber) -> BlockNumber { now @@ -226,8 +261,8 @@ pub trait ElectionProvider { /// Elect a new set of winners. /// - /// The result is returned in a target major format, namely as vector of supports. - fn elect() -> Result, Self::Error>; + /// The result is returned in a target major format, namely as vector of supports. + fn elect() -> Result<(Supports, Weight), Self::Error>; } #[cfg(feature = "std")] @@ -235,7 +270,7 @@ impl ElectionProvider for () { type Error = &'static str; type DataProvider = (); - fn elect() -> Result, Self::Error> { + fn elect() -> Result<(Supports, Weight), Self::Error> { Err("<() as ElectionProvider> cannot do anything.") } } diff --git a/primitives/election-providers/src/onchain.rs b/frame/election-provider-support/src/onchain.rs similarity index 69% rename from primitives/election-providers/src/onchain.rs rename to frame/election-provider-support/src/onchain.rs index b50dff2ff17d9..e034a9c36a8ac 100644 --- a/primitives/election-providers/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -20,12 +20,15 @@ use crate::{ElectionDataProvider, ElectionProvider}; use sp_npos_elections::*; use sp_std::{collections::btree_map::BTreeMap, marker::PhantomData, prelude::*}; +use frame_support::{traits::Get, weights::Weight}; /// Errors of the on-chain election. #[derive(Eq, PartialEq, Debug)] pub enum Error { /// An internal error in the NPoS elections crate. NposElections(sp_npos_elections::Error), + /// Errors from the data provider. + DataProvider(&'static str), } impl From for Error { @@ -40,13 +43,20 @@ impl From for Error { /// /// ### Warning /// -/// This can be very expensive to run frequently on-chain. Use with care. +/// This can be very expensive to run frequently on-chain. Use with care. Moreover, this +/// implementation ignores the additional data of the election data provider and gives no insight on +/// how much weight was consumed. +/// +/// Finally, this implementation does not impose any limits on the number of voters and targets that +/// are provided. pub struct OnChainSequentialPhragmen(PhantomData); /// Configuration trait of [`OnChainSequentialPhragmen`]. /// /// Note that this is similar to a pallet traits, but [`OnChainSequentialPhragmen`] is not a pallet. pub trait Config { + /// The block limits. + type BlockWeights: Get; /// The account identifier type. type AccountId: IdentifierT; /// The block number type. @@ -61,10 +71,11 @@ impl ElectionProvider for OnChainSequen type Error = Error; type DataProvider = T::DataProvider; - fn elect() -> Result, Self::Error> { - let voters = Self::DataProvider::voters(); - let targets = Self::DataProvider::targets(); - let desired_targets = Self::DataProvider::desired_targets() as usize; + fn elect() -> Result<(Supports, Weight), Self::Error> { + let (voters, _) = Self::DataProvider::voters(None).map_err(Error::DataProvider)?; + let (targets, _) = Self::DataProvider::targets(None).map_err(Error::DataProvider)?; + let (desired_targets, _) = + Self::DataProvider::desired_targets().map_err(Error::DataProvider)?; let mut stake_map: BTreeMap = BTreeMap::new(); @@ -77,13 +88,13 @@ impl ElectionProvider for OnChainSequen }; let ElectionResult { winners, assignments } = - seq_phragmen::<_, T::Accuracy>(desired_targets, targets, voters, None) + seq_phragmen::<_, T::Accuracy>(desired_targets as usize, targets, voters, None) .map_err(Error::from)?; let staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?; let winners = to_without_backing(winners); - to_supports(&winners, &staked).map_err(Error::from) + to_supports(&winners, &staked).map_err(Error::from).map(|s| (s, T::BlockWeights::get().max_block)) } } @@ -92,12 +103,13 @@ mod tests { use super::*; use sp_npos_elections::Support; use sp_runtime::Perbill; + use frame_support::weights::Weight; type AccountId = u64; type BlockNumber = u32; - struct Runtime; impl Config for Runtime { + type BlockWeights = (); type AccountId = AccountId; type BlockNumber = BlockNumber; type Accuracy = Perbill; @@ -108,24 +120,24 @@ mod tests { mod mock_data_provider { use super::*; + use crate::data_provider; pub struct DataProvider; impl ElectionDataProvider for DataProvider { - fn voters() -> Vec<(AccountId, VoteWeight, Vec)> { - vec![ - (1, 10, vec![10, 20]), - (2, 20, vec![30, 20]), - (3, 30, vec![10, 30]), - ] + const MAXIMUM_VOTES_PER_VOTER: u32 = 2; + fn voters( + _: Option, + ) -> data_provider::Result<(Vec<(AccountId, VoteWeight, Vec)>, Weight)> { + Ok((vec![(1, 10, vec![10, 20]), (2, 20, vec![30, 20]), (3, 30, vec![10, 30])], 0)) } - fn targets() -> Vec { - vec![10, 20, 30] + fn targets(_: Option) -> data_provider::Result<(Vec, Weight)> { + Ok((vec![10, 20, 30], 0)) } - fn desired_targets() -> u32 { - 2 + fn desired_targets() -> data_provider::Result<(u32, Weight)> { + Ok((2, 0)) } fn next_election_prediction(_: BlockNumber) -> BlockNumber { @@ -137,7 +149,7 @@ mod tests { #[test] fn onchain_seq_phragmen_works() { assert_eq!( - OnChainPhragmen::elect().unwrap(), + OnChainPhragmen::elect().unwrap().0, vec![ ( 10, diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 779570ca633ee..7f0a6afb2b106 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -92,9 +92,9 @@ //! //! ### Module Information //! -//! - [`election_sp_phragmen::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Module`] #![cfg_attr(not(feature = "std"), no_std)] @@ -1084,7 +1084,7 @@ mod tests { type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); - type SS58Prefix = (); + type SS58Prefix = (); } parameter_types! { @@ -1096,7 +1096,7 @@ mod tests { type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Module; + type AccountStore = frame_system::Pallet; type MaxLocks = (); type WeightInfo = (); } @@ -1187,9 +1187,9 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Module, Call, Event}, - Balances: pallet_balances::{Module, Call, Event, Config}, - Elections: elections_phragmen::{Module, Call, Event, Config}, + System: frame_system::{Pallet, Call, Event}, + Balances: pallet_balances::{Pallet, Call, Event, Config}, + Elections: elections_phragmen::{Pallet, Call, Event, Config}, } ); @@ -1308,7 +1308,6 @@ mod tests { } fn has_lock(who: &u64) -> u64 { - dbg!(Balances::locks(who)); Balances::locks(who) .get(0) .cloned() diff --git a/frame/elections-phragmen/src/migrations_3_0_0.rs b/frame/elections-phragmen/src/migrations_3_0_0.rs index 8adc4c1a69f7c..8afc9ed66920b 100644 --- a/frame/elections-phragmen/src/migrations_3_0_0.rs +++ b/frame/elections-phragmen/src/migrations_3_0_0.rs @@ -21,7 +21,6 @@ use codec::{Encode, Decode, FullCodec}; use sp_std::prelude::*; use frame_support::{ RuntimeDebug, weights::Weight, Twox64Concat, - storage::types::{StorageMap, StorageValue}, traits::{GetPalletVersion, PalletVersion}, }; @@ -51,38 +50,21 @@ pub trait V2ToV3 { type Balance: 'static + FullCodec + Copy; } -struct __Candidates; -impl frame_support::traits::StorageInstance for __Candidates { - fn pallet_prefix() -> &'static str { "PhragmenElection" } - const STORAGE_PREFIX: &'static str = "Candidates"; -} - -#[allow(type_alias_bounds)] -type Candidates = StorageValue<__Candidates, Vec<(T::AccountId, T::Balance)>>; - -struct __Members; -impl frame_support::traits::StorageInstance for __Members { - fn pallet_prefix() -> &'static str { "PhragmenElection" } - const STORAGE_PREFIX: &'static str = "Members"; -} -#[allow(type_alias_bounds)] -type Members = StorageValue<__Members, Vec>>; - -struct __RunnersUp; -impl frame_support::traits::StorageInstance for __RunnersUp { - fn pallet_prefix() -> &'static str { "PhragmenElection" } - const STORAGE_PREFIX: &'static str = "RunnersUp"; -} -#[allow(type_alias_bounds)] -type RunnersUp = StorageValue<__RunnersUp, Vec>>; - -struct __Voting; -impl frame_support::traits::StorageInstance for __Voting { - fn pallet_prefix() -> &'static str { "PhragmenElection" } - const STORAGE_PREFIX: &'static str = "Voting"; -} -#[allow(type_alias_bounds)] -type Voting = StorageMap<__Voting, Twox64Concat, T::AccountId, Voter>; +frame_support::generate_storage_alias!( + PhragmenElection, Candidates => Value> +); +frame_support::generate_storage_alias!( + PhragmenElection, Members => Value>> +); +frame_support::generate_storage_alias!( + PhragmenElection, RunnersUp => Value>> +); +frame_support::generate_storage_alias!( + PhragmenElection, Voting => Map< + (Twox64Concat, T::AccountId), + Voter + > +); /// Apply all of the migrations from 2_0_0 to 3_0_0. /// diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index 6eaa2dfad3732..d6b68bbf5a043 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -713,7 +713,7 @@ decl_event!( BadReaperSlashed(AccountId), /// A tally (for approval votes of \[seats\]) has started. TallyStarted(u32), - /// A tally (for approval votes of seat(s)) has ended (with one or more new members). + /// A tally (for approval votes of seat(s)) has ended (with one or more new members). /// \[incoming, outgoing\] TallyFinalized(Vec, Vec), } @@ -759,7 +759,7 @@ impl Module { // if there's a tally in progress, then next tally can begin immediately afterwards (tally_end, c.len() - leavers.len() + comers as usize, comers) } else { - (>::block_number(), c.len(), 0) + (>::block_number(), c.len(), 0) }; if count < desired_seats as usize { Some(next_possible) @@ -914,7 +914,7 @@ impl Module { fn start_tally() { let members = Self::members(); let desired_seats = Self::desired_seats() as usize; - let number = >::block_number(); + let number = >::block_number(); let expiring = members.iter().take_while(|i| i.1 <= number).map(|i| i.0.clone()).collect::>(); let retaining_seats = members.len() - expiring.len(); @@ -942,7 +942,7 @@ impl Module { .ok_or("finalize can only be called after a tally is started.")?; let leaderboard: Vec<(BalanceOf, T::AccountId)> = >::take() .unwrap_or_default(); - let new_expiry = >::block_number() + Self::term_duration(); + let new_expiry = >::block_number() + Self::term_duration(); // return bond to winners. let candidacy_bond = T::CandidacyBond::get(); diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index 31d3f5a1c28a5..287eaa27b196a 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -135,9 +135,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event}, - Balances: pallet_balances::{Module, Call, Event, Config}, - Elections: elections::{Module, Call, Event, Config}, + System: system::{Pallet, Call, Event}, + Balances: pallet_balances::{Pallet, Call, Event, Config}, + Elections: elections::{Pallet, Call, Event, Config}, } ); diff --git a/frame/example-offchain-worker/src/tests.rs b/frame/example-offchain-worker/src/tests.rs index 7707e7d61e629..e91b374adbe1d 100644 --- a/frame/example-offchain-worker/src/tests.rs +++ b/frame/example-offchain-worker/src/tests.rs @@ -49,8 +49,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Example: example_offchain_worker::{Module, Call, Storage, Event, ValidateUnsigned}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Example: example_offchain_worker::{Pallet, Call, Storage, Event, ValidateUnsigned}, } ); diff --git a/frame/example-parallel/src/tests.rs b/frame/example-parallel/src/tests.rs index da2892c67d42a..e82d75e632068 100644 --- a/frame/example-parallel/src/tests.rs +++ b/frame/example-parallel/src/tests.rs @@ -33,8 +33,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Example: pallet_example_parallel::{Module, Call, Storage}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Example: pallet_example_parallel::{Pallet, Call, Storage}, } ); diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index de741294b9c16..e24616bc84cf3 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -21,8 +21,8 @@ pallet-balances = { version = "3.0.0", default-features = false, path = "../bala sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } - frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } +log = { version = "0.4.14", default-features = false } [dev-dependencies] sp-core = { version = "3.0.0", path = "../../primitives/core", default-features = false } @@ -30,14 +30,15 @@ sp-core = { version = "3.0.0", path = "../../primitives/core", default-features [features] default = ["std"] std = [ - "serde", "codec/std", - "sp-runtime/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-balances/std", + "serde", "sp-io/std", + "sp-runtime/std", "sp-std/std" ] runtime-benchmarks = ["frame-benchmarking"] diff --git a/frame/example/src/benchmarking.rs b/frame/example/src/benchmarking.rs new file mode 100644 index 0000000000000..64602ca41cee9 --- /dev/null +++ b/frame/example/src/benchmarking.rs @@ -0,0 +1,76 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking for pallet-example. + +#![cfg(feature = "runtime-benchmarks")] + +use crate::*; +use frame_benchmarking::{benchmarks, whitelisted_caller, impl_benchmark_test_suite}; +use frame_system::RawOrigin; + +// To actually run this benchmark on pallet-example, we need to put this pallet into the +// runtime and compile it with `runtime-benchmarks` feature. The detail procedures are +// documented at: +// https://substrate.dev/docs/en/knowledgebase/runtime/benchmarking#how-to-benchmark +// +// The auto-generated weight estimate of this pallet is copied over to the `weights.rs` file. +// The exact command of how the estimate generated is printed at the top of the file. + +// Details on using the benchmarks macro can be seen at: +// https://substrate.dev/rustdocs/v3.0.0/frame_benchmarking/macro.benchmarks.html +benchmarks!{ + // This will measure the execution time of `set_dummy` for b in [1..1000] range. + set_dummy_benchmark { + // This is the benchmark setup phase + let b in 1 .. 1000; + }: set_dummy(RawOrigin::Root, b.into()) // The execution phase is just running `set_dummy` extrinsic call + verify { + // This is the optional benchmark verification phase, asserting certain states. + assert_eq!(Pallet::::dummy(), Some(b.into())) + } + + // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. + // The benchmark execution phase is shorthanded. When the name of the benchmark case is the same + // as the extrinsic call. `_(...)` is used to represent the extrinsic name. + // The benchmark verification phase is omitted. + accumulate_dummy { + let b in 1 .. 1000; + // The caller account is whitelisted for DB reads/write by the benchmarking macro. + let caller: T::AccountId = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), b.into()) + + // This will measure the execution time of sorting a vector. + sort_vector { + let x in 0 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { + // The benchmark execution phase could also be a closure with custom code + m.sort(); + } +} + +// This line generates test cases for benchmarking, and could be run by: +// `cargo test -p pallet-example --all-features`, you will see an additional line of: +// `test benchmarking::benchmark_tests::test_benchmarks ... ok` in the result. +// +// The line generates three steps per benchmark, with repeat=1 and the three steps are +// [low, mid, high] of the range. +impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index 7a537f4522abf..fd1bc292ac8aa 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -255,28 +255,45 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::marker::PhantomData; +use sp_std::{ + prelude::*, + marker::PhantomData +}; use frame_support::{ dispatch::DispatchResult, traits::IsSubType, weights::{DispatchClass, ClassifyDispatch, WeighData, Weight, PaysFee, Pays}, }; -use sp_std::prelude::*; use frame_system::{ensure_signed}; use codec::{Encode, Decode}; use sp_runtime::{ traits::{ - SignedExtension, Bounded, SaturatedConversion, DispatchInfoOf, + SignedExtension, Bounded, SaturatedConversion, DispatchInfoOf, Saturating }, transaction_validity::{ ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, }, }; +use log::info; + +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; + +#[cfg(test)] +mod tests; + +mod benchmarking; +pub mod weights; +pub use weights::*; + +/// A type alias for the balance type from this pallet's point of view. +type BalanceOf = ::Balance; +const MILLICENTS: u32 = 1_000_000_000; // A custom weight calculator tailored for the dispatch call `set_dummy()`. This actually examines // the arguments and makes a decision based upon them. // // The `WeightData` trait has access to the arguments of the dispatch that it wants to assign a -// weight to. Nonetheless, the trait itself can not make any assumptions about what the generic type +// weight to. Nonetheless, the trait itself cannot make any assumptions about what the generic type // of the arguments (`T`) is. Based on our needs, we could replace `T` with a more concrete type // while implementing the trait. The `pallet::weight` expects whatever implements `WeighData` to // replace `T` with a tuple of the dispatch arguments. This is exactly how we will craft the @@ -286,13 +303,22 @@ use sp_runtime::{ // - The final weight of each dispatch is calculated as the argument of the call multiplied by the // parameter given to the `WeightForSetDummy`'s constructor. // - assigns a dispatch class `operational` if the argument of the call is more than 1000. +// +// More information can be read at: +// - https://substrate.dev/docs/en/knowledgebase/learn-substrate/weight +// - https://substrate.dev/docs/en/knowledgebase/runtime/fees#default-weight-annotations +// +// Manually configuring weight is an advanced operation and what you really need may well be +// fulfilled by running the benchmarking toolchain. Refer to `benchmarking.rs` file. struct WeightForSetDummy(BalanceOf); impl WeighData<(&BalanceOf,)> for WeightForSetDummy { fn weigh_data(&self, target: (&BalanceOf,)) -> Weight { let multiplier = self.0; - (*target.0 * multiplier).saturated_into::() + // *target.0 is the amount passed into the extrinsic + let cents = *target.0 / >::from(MILLICENTS); + (cents * multiplier).saturated_into::() } } @@ -312,12 +338,6 @@ impl PaysFee<(&BalanceOf,)> for WeightForSetDummy } } -/// A type alias for the balance type from this pallet's point of view. -type BalanceOf = ::Balance; - -// Re-export pallet items so that they can be accessed from the crate namespace. -pub use pallet::*; - // Definition of the pallet logic, to be aggregated at runtime definition through // `construct_runtime`. #[frame_support::pallet] @@ -334,8 +354,15 @@ pub mod pallet { /// `frame_system::Config` should always be included. #[pallet::config] pub trait Config: pallet_balances::Config + frame_system::Config { + // Setting a constant config parameter from the runtime + #[pallet::constant] + type MagicNumber: Get; + /// The overarching event type. type Event: From> + IsType<::Event>; + + /// Type representing the weight of this pallet + type WeightInfo: WeightInfo; } // Simple declaration of the `Pallet` type. It is placeholder we use to implement traits and @@ -354,14 +381,12 @@ pub mod pallet { fn on_initialize(_n: T::BlockNumber) -> Weight { // Anything that needs to be done at the start of the block. // We don't do anything here. - 0 } // `on_finalize` is executed at the end of block after all extrinsic are dispatched. fn on_finalize(_n: T::BlockNumber) { - // We just kill our dummy storage item. - >::kill(); + // Perform necessary data/state clean up here. } // A runtime code run after every block and have access to extended set of APIs. @@ -370,7 +395,9 @@ pub mod pallet { fn offchain_worker(_n: T::BlockNumber) { // We don't do anything here. // but we could dispatch extrinsic (transaction/unsigned/inherent) using - // sp_io::submit_extrinsic + // sp_io::submit_extrinsic. + // To see example on offchain worker, please refer to example-offchain-worker pallet + // accompanied in this repository. } } @@ -455,11 +482,16 @@ pub mod pallet { // difficulty) of the transaction and the latter demonstrates the [`DispatchClass`] of the // call. A higher weight means a larger transaction (less of which can be placed in a // single block). - #[pallet::weight(0)] + // + // The weight for this extrinsic we rely on the auto-generated `WeightInfo` from the benchmark + // toolchain. + #[pallet::weight( + ::WeightInfo::accumulate_dummy((*increase_by).saturated_into()) + )] pub(super) fn accumulate_dummy( origin: OriginFor, increase_by: T::Balance - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { // This is a public call, so we ensure that the origin is some signed account. let _sender = ensure_signed(origin)?; @@ -478,15 +510,16 @@ pub mod pallet { // Here's the new one of read and then modify the value. >::mutate(|dummy| { - let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); + // Using `saturating_add` instead of a regular `+` to avoid overflowing + let new_dummy = dummy.map_or(increase_by, |d| d.saturating_add(increase_by)); *dummy = Some(new_dummy); }); // Let's deposit an event to let the outside world know this happened. - Self::deposit_event(Event::Dummy(increase_by)); + Self::deposit_event(Event::AccumulateDummy(increase_by)); // All good, no refund. - Ok(().into()) + Ok(()) } /// A privileged call; in this case it resets our dummy value to something new. @@ -496,17 +529,28 @@ pub mod pallet { // calls to be executed - we don't need to care why. Because it's privileged, we can // assume it's a one-off operation and substantial processing/storage/memory can be used // without worrying about gameability or attack scenarios. + // + // The weight for this extrinsic we use our own weight object `WeightForSetDummy` to determine + // its weight #[pallet::weight(WeightForSetDummy::(>::from(100u32)))] - fn set_dummy( + pub(super) fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { ensure_root(origin)?; + + // Print out log or debug message in the console via log::{error, warn, info, debug, trace}, + // accepting format strings similar to `println!`. + // https://substrate.dev/rustdocs/v3.0.0/log/index.html + info!("New value is now: {:?}", new_value); + // Put the new value into storage. >::put(new_value); + Self::deposit_event(Event::SetDummy(new_value)); + // All good, no refund. - Ok(().into()) + Ok(()) } } @@ -520,7 +564,9 @@ pub mod pallet { pub enum Event { // Just a normal `enum`, here's a dummy event to ensure it compiles. /// Dummy event, just here so there's a generic type that's used. - Dummy(BalanceOf), + AccumulateDummy(BalanceOf), + SetDummy(BalanceOf), + SetBar(T::AccountId, BalanceOf), } // pallet::storage attributes allow for type-safe usage of the Substrate storage database, @@ -545,14 +591,13 @@ pub mod pallet { // A map that has enumerable entries. #[pallet::storage] #[pallet::getter(fn bar)] - pub(super) type Bar = StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance, ValueQuery>; + pub(super) type Bar = StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance>; // this one uses the query kind: `ValueQuery`, we'll demonstrate the usage of 'mutate' API. #[pallet::storage] #[pallet::getter(fn foo)] pub(super) type Foo = StorageValue<_, T::Balance, ValueQuery>; - // The genesis config type. #[pallet::genesis_config] pub struct GenesisConfig { @@ -600,7 +645,7 @@ impl Pallet { let prev = >::get(); // Because Foo has 'default', the type of 'foo' in closure is the raw type instead of an Option<> type. let result = >::mutate(|foo| { - *foo = *foo + increase_by; + *foo = foo.saturating_add(increase_by); *foo }); assert!(prev + increase_by == result); @@ -640,11 +685,11 @@ impl Pallet { // types defined in the runtime. Lookup `pub type SignedExtra = (...)` in `node/runtime` and // `node-template` for an example of this. -/// A simple signed extension that checks for the `set_dummy` call. In that case, it increases the -/// priority and prints some log. -/// -/// Additionally, it drops any transaction with an encoded length higher than 200 bytes. No -/// particular reason why, just to demonstrate the power of signed extensions. +// A simple signed extension that checks for the `set_dummy` call. In that case, it increases the +// priority and prints some log. +// +// Additionally, it drops any transaction with an encoded length higher than 200 bytes. No +// particular reason why, just to demonstrate the power of signed extensions. #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct WatchDummy(PhantomData); @@ -691,201 +736,3 @@ where } } } - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking { - use super::*; - use frame_benchmarking::{benchmarks, account, impl_benchmark_test_suite}; - use frame_system::RawOrigin; - - benchmarks!{ - // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. - accumulate_dummy { - let b in 1 .. 1000; - let caller = account("caller", 0, 0); - }: _ (RawOrigin::Signed(caller), b.into()) - - // This will measure the execution time of `set_dummy` for b in [1..1000] range. - set_dummy { - let b in 1 .. 1000; - }: set_dummy (RawOrigin::Root, b.into()) - - // This will measure the execution time of `set_dummy` for b in [1..10] range. - another_set_dummy { - let b in 1 .. 10; - }: set_dummy (RawOrigin::Root, b.into()) - - // This will measure the execution time of sorting a vector. - sort_vector { - let x in 0 .. 10000; - let mut m = Vec::::new(); - for i in (0..x).rev() { - m.push(i); - } - }: { - m.sort(); - } - } - - impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); -} - -#[cfg(test)] -mod tests { - use super::*; - - use frame_support::{ - assert_ok, parameter_types, - weights::{DispatchInfo, GetDispatchInfo}, traits::{OnInitialize, OnFinalize} - }; - use sp_core::H256; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use sp_runtime::{ - testing::Header, BuildStorage, - traits::{BlakeTwo256, IdentityLookup}, - }; - // Reexport crate as its pallet name for construct_runtime. - use crate as pallet_example; - - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - type Block = frame_system::mocking::MockBlock; - - // For testing the pallet, we construct a mock runtime. - frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Example: pallet_example::{Module, Call, Storage, Config, Event}, - } - ); - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); - } - impl frame_system::Config for Test { - type BaseCallFilter = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = Call; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - impl pallet_balances::Config for Test { - type MaxLocks = (); - type Balance = u64; - type DustRemoval = (); - type Event = Event; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); - } - impl Config for Test { - type Event = Event; - } - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - pub fn new_test_ext() -> sp_io::TestExternalities { - let t = GenesisConfig { - // We use default for brevity, but you can configure as desired if needed. - frame_system: Default::default(), - pallet_balances: Default::default(), - pallet_example: pallet_example::GenesisConfig { - dummy: 42, - // we configure the map with (key, value) pairs. - bar: vec![(1, 2), (2, 3)], - foo: 24, - }, - }.build_storage().unwrap(); - t.into() - } - - #[test] - fn it_works_for_optional_value() { - new_test_ext().execute_with(|| { - // Check that GenesisBuilder works properly. - assert_eq!(Example::dummy(), Some(42)); - - // Check that accumulate works when we have Some value in Dummy already. - assert_ok!(Example::accumulate_dummy(Origin::signed(1), 27)); - assert_eq!(Example::dummy(), Some(69)); - - // Check that finalizing the block removes Dummy from storage. - >::on_finalize(1); - assert_eq!(Example::dummy(), None); - - // Check that accumulate works when we Dummy has None in it. - >::on_initialize(2); - assert_ok!(Example::accumulate_dummy(Origin::signed(1), 42)); - assert_eq!(Example::dummy(), Some(42)); - }); - } - - #[test] - fn it_works_for_default_value() { - new_test_ext().execute_with(|| { - assert_eq!(Example::foo(), 24); - assert_ok!(Example::accumulate_foo(Origin::signed(1), 1)); - assert_eq!(Example::foo(), 25); - }); - } - - #[test] - fn signed_ext_watch_dummy_works() { - new_test_ext().execute_with(|| { - let call = >::set_dummy(10).into(); - let info = DispatchInfo::default(); - - assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 150) - .unwrap() - .priority, - u64::max_value(), - ); - assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 250), - InvalidTransaction::ExhaustsResources.into(), - ); - }) - } - - #[test] - fn weights_work() { - // must have a defined weight. - let default_call = >::accumulate_dummy(10); - let info = default_call.get_dispatch_info(); - // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` - assert_eq!(info.weight, 0); - - // must have a custom weight of `100 * arg = 2000` - let custom_call = >::set_dummy(20); - let info = custom_call.get_dispatch_info(); - assert_eq!(info.weight, 2000); - } -} diff --git a/frame/example/src/tests.rs b/frame/example/src/tests.rs new file mode 100644 index 0000000000000..ed866344a4b18 --- /dev/null +++ b/frame/example/src/tests.rs @@ -0,0 +1,189 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-example. + +use crate::*; +use frame_support::{ + assert_ok, parameter_types, + weights::{DispatchInfo, GetDispatchInfo}, traits::OnInitialize +}; +use sp_core::H256; +// The testing primitives are very useful for avoiding having to work with signatures +// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. +use sp_runtime::{ + testing::Header, BuildStorage, + traits::{BlakeTwo256, IdentityLookup}, +}; +// Reexport crate as its pallet name for construct_runtime. +use crate as pallet_example; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +// For testing the pallet, we construct a mock runtime. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Example: pallet_example::{Pallet, Call, Storage, Config, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); +} +impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = Call; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Config for Test { + type MaxLocks = (); + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +parameter_types! { + pub const MagicNumber: u64 = 1_000_000_000; +} +impl Config for Test { + type MagicNumber = MagicNumber; + type Event = Event; + type WeightInfo = (); +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = GenesisConfig { + // We use default for brevity, but you can configure as desired if needed. + frame_system: Default::default(), + pallet_balances: Default::default(), + pallet_example: pallet_example::GenesisConfig { + dummy: 42, + // we configure the map with (key, value) pairs. + bar: vec![(1, 2), (2, 3)], + foo: 24, + }, + }.build_storage().unwrap(); + t.into() +} + +#[test] +fn it_works_for_optional_value() { + new_test_ext().execute_with(|| { + // Check that GenesisBuilder works properly. + let val1 = 42; + let val2 = 27; + assert_eq!(Example::dummy(), Some(val1)); + + // Check that accumulate works when we have Some value in Dummy already. + assert_ok!(Example::accumulate_dummy(Origin::signed(1), val2)); + assert_eq!(Example::dummy(), Some(val1 + val2)); + + // Check that accumulate works when we Dummy has None in it. + >::on_initialize(2); + assert_ok!(Example::accumulate_dummy(Origin::signed(1), val1)); + assert_eq!(Example::dummy(), Some(val1 + val2 + val1)); + }); +} + +#[test] +fn it_works_for_default_value() { + new_test_ext().execute_with(|| { + assert_eq!(Example::foo(), 24); + assert_ok!(Example::accumulate_foo(Origin::signed(1), 1)); + assert_eq!(Example::foo(), 25); + }); +} + +#[test] +fn set_dummy_works() { + new_test_ext().execute_with(|| { + let test_val = 133; + assert_ok!(Example::set_dummy(Origin::root(), test_val.into())); + assert_eq!(Example::dummy(), Some(test_val)); + }); +} + +#[test] +fn signed_ext_watch_dummy_works() { + new_test_ext().execute_with(|| { + let call = >::set_dummy(10).into(); + let info = DispatchInfo::default(); + + assert_eq!( + WatchDummy::(PhantomData).validate(&1, &call, &info, 150) + .unwrap() + .priority, + u64::max_value(), + ); + assert_eq!( + WatchDummy::(PhantomData).validate(&1, &call, &info, 250), + InvalidTransaction::ExhaustsResources.into(), + ); + }) +} + +#[test] +fn weights_work() { + // must have a defined weight. + let default_call = >::accumulate_dummy(10); + let info1 = default_call.get_dispatch_info(); + // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` + assert!(info1.weight > 0); + + + // `set_dummy` is simpler than `accumulate_dummy`, and the weight + // should be less. + let custom_call = >::set_dummy(20); + let info2 = custom_call.get_dispatch_info(); + assert!(info1.weight > info2.weight); +} diff --git a/frame/example/src/weights.rs b/frame/example/src/weights.rs new file mode 100644 index 0000000000000..db6491335c76f --- /dev/null +++ b/frame/example/src/weights.rs @@ -0,0 +1,100 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_example +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-03-15, STEPS: `[100, ]`, REPEAT: 10, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// ./target/release/substrate +// benchmark +// --chain +// dev +// --execution +// wasm +// --wasm-execution +// compiled +// --pallet +// pallet_example +// --extrinsic +// * +// --steps +// 100 +// --repeat +// 10 +// --raw +// --output +// ./ +// --template +// ./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_example. +pub trait WeightInfo { + fn set_dummy_benchmark(b: u32, ) -> Weight; + fn accumulate_dummy(b: u32, ) -> Weight; + fn sort_vector(x: u32, ) -> Weight; +} + +/// Weights for pallet_example using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn set_dummy_benchmark(b: u32, ) -> Weight { + (5_834_000 as Weight) + .saturating_add((24_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn accumulate_dummy(b: u32, ) -> Weight { + (51_353_000 as Weight) + .saturating_add((14_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn sort_vector(x: u32, ) -> Weight { + (2_569_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(x as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn set_dummy_benchmark(b: u32, ) -> Weight { + (5_834_000 as Weight) + .saturating_add((24_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn accumulate_dummy(b: u32, ) -> Weight { + (51_353_000 as Weight) + .saturating_add((14_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn sort_vector(x: u32, ) -> Weight { + (2_569_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(x as Weight)) + } +} diff --git a/frame/executive/README.md b/frame/executive/README.md index 183e32b2ff8a9..ae3bbf1a9d994 100644 --- a/frame/executive/README.md +++ b/frame/executive/README.md @@ -35,7 +35,7 @@ The default Substrate node template declares the [`Executive`](https://docs.rs/f ```rust # /// Executive: handles dispatch to the various modules. -pub type Executive = executive::Executive; +pub type Executive = executive::Executive; ``` ### Custom `OnRuntimeUpgrade` logic @@ -54,7 +54,7 @@ impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { } } -pub type Executive = executive::Executive; +pub type Executive = executive::Executive; ``` License: Apache-2.0 diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index f48fda4841d26..277b20cf20bfa 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -59,7 +59,7 @@ //! # type Context = frame_system::ChainContext; //! # pub type Block = generic::Block; //! # pub type Balances = u64; -//! # pub type AllModules = u64; +//! # pub type AllPallets = u64; //! # pub enum Runtime {}; //! # use sp_runtime::transaction_validity::{ //! # TransactionValidity, UnknownTransaction, TransactionSource, @@ -73,7 +73,7 @@ //! # } //! # } //! /// Executive: handles dispatch to the various modules. -//! pub type Executive = executive::Executive; +//! pub type Executive = executive::Executive; //! ``` //! //! ### Custom `OnRuntimeUpgrade` logic @@ -90,7 +90,7 @@ //! # type Context = frame_system::ChainContext; //! # pub type Block = generic::Block; //! # pub type Balances = u64; -//! # pub type AllModules = u64; +//! # pub type AllPallets = u64; //! # pub enum Runtime {}; //! # use sp_runtime::transaction_validity::{ //! # TransactionValidity, UnknownTransaction, TransactionSource, @@ -111,7 +111,7 @@ //! } //! } //! -//! pub type Executive = executive::Executive; +//! pub type Executive = executive::Executive; //! ``` #![cfg_attr(not(feature = "std"), no_std)] @@ -144,12 +144,12 @@ pub type OriginOf = as Dispatchable>::Origin; /// - `Block`: The block type of the runtime /// - `Context`: The context that is used when checking an extrinsic. /// - `UnsignedValidator`: The unsigned transaction validator of the runtime. -/// - `AllModules`: Tuple that contains all modules. Will be used to call e.g. `on_initialize`. +/// - `AllPallets`: Tuple that contains all modules. Will be used to call e.g. `on_initialize`. /// - `OnRuntimeUpgrade`: Custom logic that should be called after a runtime upgrade. Modules are -/// already called by `AllModules`. It will be called before all modules will +/// already called by `AllPallets`. It will be called before all modules will /// be called. -pub struct Executive( - PhantomData<(System, Block, Context, UnsignedValidator, AllModules, OnRuntimeUpgrade)> +pub struct Executive( + PhantomData<(System, Block, Context, UnsignedValidator, AllPallets, OnRuntimeUpgrade)> ); impl< @@ -157,7 +157,7 @@ impl< Block: traits::Block, Context: Default, UnsignedValidator, - AllModules: + AllPallets: OnRuntimeUpgrade + OnInitialize + OnIdle + @@ -165,7 +165,7 @@ impl< OffchainWorker, COnRuntimeUpgrade: OnRuntimeUpgrade, > ExecuteBlock for - Executive + Executive where Block::Extrinsic: Checkable + Codec, CheckedOf: @@ -176,7 +176,7 @@ where UnsignedValidator: ValidateUnsigned>, { fn execute_block(block: Block) { - Executive::::execute_block(block); + Executive::::execute_block(block); } } @@ -185,13 +185,13 @@ impl< Block: traits::Block

, Context: Default, UnsignedValidator, - AllModules: OnRuntimeUpgrade + AllPallets: OnRuntimeUpgrade + OnInitialize + OnIdle + OnFinalize + OffchainWorker, COnRuntimeUpgrade: OnRuntimeUpgrade, - > Executive + > Executive where Block::Extrinsic: Checkable + Codec, CheckedOf: Applyable + GetDispatchInfo, @@ -204,10 +204,10 @@ where pub fn execute_on_runtime_upgrade() -> frame_support::weights::Weight { let mut weight = 0; weight = weight.saturating_add( - as OnRuntimeUpgrade>::on_runtime_upgrade(), + as OnRuntimeUpgrade>::on_runtime_upgrade(), ); weight = weight.saturating_add(COnRuntimeUpgrade::on_runtime_upgrade()); - weight = weight.saturating_add(::on_runtime_upgrade()); + weight = weight.saturating_add(::on_runtime_upgrade()); weight } @@ -218,7 +218,7 @@ where #[cfg(feature = "try-runtime")] pub fn try_runtime_upgrade() -> Result { < - (frame_system::Module::, COnRuntimeUpgrade, AllModules) + (frame_system::Pallet::, COnRuntimeUpgrade, AllPallets) as OnRuntimeUpgrade >::pre_upgrade()?; @@ -226,7 +226,7 @@ where let weight = Self::execute_on_runtime_upgrade(); < - (frame_system::Module::, COnRuntimeUpgrade, AllModules) + (frame_system::Pallet::, COnRuntimeUpgrade, AllPallets) as OnRuntimeUpgrade >::post_upgrade()?; @@ -265,24 +265,24 @@ where if Self::runtime_upgraded() { weight = weight.saturating_add(Self::execute_on_runtime_upgrade()); } - >::initialize( + >::initialize( block_number, parent_hash, digest, frame_system::InitKind::Full, ); weight = weight.saturating_add( - as OnInitialize>::on_initialize(*block_number) + as OnInitialize>::on_initialize(*block_number) ); weight = weight.saturating_add( - >::on_initialize(*block_number) + >::on_initialize(*block_number) ); weight = weight.saturating_add( >::get().base_block ); - >::register_extra_weight_unchecked(weight, DispatchClass::Mandatory); + >::register_extra_weight_unchecked(weight, DispatchClass::Mandatory); - frame_system::Module::::note_finished_initialize(); + frame_system::Pallet::::note_finished_initialize(); } /// Returns if the runtime was upgraded since the last time this function was called. @@ -308,7 +308,7 @@ where let n = header.number().clone(); assert!( n > System::BlockNumber::zero() - && >::block_hash(n - System::BlockNumber::one()) == *header.parent_hash(), + && >::block_hash(n - System::BlockNumber::one()) == *header.parent_hash(), "Parent hash should be valid.", ); } @@ -350,7 +350,7 @@ where }); // post-extrinsics book-keeping - >::note_finished_extrinsics(); + >::note_finished_extrinsics(); Self::idle_and_finalize_hook(block_number); } @@ -360,36 +360,36 @@ where pub fn finalize_block() -> System::Header { sp_io::init_tracing(); sp_tracing::enter_span!( sp_tracing::Level::TRACE, "finalize_block" ); - >::note_finished_extrinsics(); - let block_number = >::block_number(); + >::note_finished_extrinsics(); + let block_number = >::block_number(); Self::idle_and_finalize_hook(block_number); - >::finalize() + >::finalize() } fn idle_and_finalize_hook(block_number: NumberFor) { - let weight = >::block_weight(); + let weight = >::block_weight(); let max_weight = >::get().max_block; let mut remaining_weight = max_weight.saturating_sub(weight.total()); if remaining_weight > 0 { let mut used_weight = - as OnIdle>::on_idle( + as OnIdle>::on_idle( block_number, remaining_weight ); remaining_weight = remaining_weight.saturating_sub(used_weight); - used_weight = >::on_idle( + used_weight = >::on_idle( block_number, remaining_weight ) .saturating_add(used_weight); - >::register_extra_weight_unchecked(used_weight, DispatchClass::Mandatory); + >::register_extra_weight_unchecked(used_weight, DispatchClass::Mandatory); } - as OnFinalize>::on_finalize(block_number); - >::on_finalize(block_number); + as OnFinalize>::on_finalize(block_number); + >::on_finalize(block_number); } /// Apply extrinsic outside of the block execution function. @@ -419,7 +419,7 @@ where // We don't need to make sure to `note_extrinsic` only after we know it's going to be // executed to prevent it from leaking in storage since at this point, it will either // execute or panic (and revert storage changes). - >::note_extrinsic(to_note); + >::note_extrinsic(to_note); // AUDIT: Under no circumstances may this function panic from here onwards. @@ -427,7 +427,7 @@ where let dispatch_info = xt.get_dispatch_info(); let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; - >::note_applied_extrinsic(&r, dispatch_info); + >::note_applied_extrinsic(&r, dispatch_info); Ok(r.map(|_| ()).map_err(|e| e.error)) } @@ -435,7 +435,7 @@ where fn final_checks(header: &System::Header) { sp_tracing::enter_span!(sp_tracing::Level::TRACE, "final_checks"); // remove temporaries - let new_header = >::finalize(); + let new_header = >::finalize(); // check digest assert_eq!( @@ -499,7 +499,7 @@ where // OffchainWorker RuntimeApi should skip initialization. let digests = header.digest().clone(); - >::initialize( + >::initialize( header.number(), header.parent_hash(), &digests, @@ -511,7 +511,7 @@ where // as well. frame_system::BlockHash::::insert(header.number(), header.hash()); - >::offchain_worker(*header.number()) + >::offchain_worker(*header.number()) } } @@ -628,9 +628,9 @@ mod tests { NodeBlock = TestBlock, UncheckedExtrinsic = TestUncheckedExtrinsic { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Custom: custom::{Module, Call, ValidateUnsigned}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Custom: custom::{Pallet, Call, ValidateUnsigned}, } ); @@ -741,7 +741,7 @@ mod tests { Block, ChainContext, Runtime, - AllModules, + AllPallets, CustomOnRuntimeUpgrade >; @@ -780,8 +780,8 @@ mod tests { )); let r = Executive::apply_extrinsic(xt); assert!(r.is_ok()); - assert_eq!(>::total_balance(&1), 142 - fee); - assert_eq!(>::total_balance(&2), 69); + assert_eq!(>::total_balance(&1), 142 - fee); + assert_eq!(>::total_balance(&2), 69); }); } @@ -857,7 +857,7 @@ mod tests { Digest::default(), )); assert!(Executive::apply_extrinsic(xt).is_err()); - assert_eq!(>::extrinsic_index(), Some(0)); + assert_eq!(>::extrinsic_index(), Some(0)); }); } @@ -883,7 +883,7 @@ mod tests { Digest::default(), )); // Base block execution weight + `on_initialize` weight from the custom module. - assert_eq!(>::block_weight().total(), base_block_weight); + assert_eq!(>::block_weight().total(), base_block_weight); for nonce in 0..=num_to_exhaust_block { let xt = TestXt::new( @@ -893,11 +893,11 @@ mod tests { if nonce != num_to_exhaust_block { assert!(res.is_ok()); assert_eq!( - >::block_weight().total(), + >::block_weight().total(), //--------------------- on_initialize + block_execution + extrinsic_base weight (encoded_len + 5) * (nonce + 1) + base_block_weight, ); - assert_eq!(>::extrinsic_index(), Some(nonce as u32 + 1)); + assert_eq!(>::extrinsic_index(), Some(nonce as u32 + 1)); } else { assert_eq!(res, Err(InvalidTransaction::ExhaustsResources.into())); } @@ -924,8 +924,8 @@ mod tests { Digest::default(), )); - assert_eq!(>::block_weight().total(), base_block_weight); - assert_eq!(>::all_extrinsics_len(), 0); + assert_eq!(>::block_weight().total(), base_block_weight); + assert_eq!(>::all_extrinsics_len(), 0); assert!(Executive::apply_extrinsic(xt.clone()).unwrap().is_ok()); assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); @@ -935,14 +935,14 @@ mod tests { let extrinsic_weight = len as Weight + ::BlockWeights ::get().get(DispatchClass::Normal).base_extrinsic; assert_eq!( - >::block_weight().total(), + >::block_weight().total(), base_block_weight + 3 * extrinsic_weight, ); - assert_eq!(>::all_extrinsics_len(), 3 * len); + assert_eq!(>::all_extrinsics_len(), 3 * len); - let _ = >::finalize(); + let _ = >::finalize(); // All extrinsics length cleaned on `System::finalize` - assert_eq!(>::all_extrinsics_len(), 0); + assert_eq!(>::all_extrinsics_len(), 0); // New Block Executive::initialize_block(&Header::new( @@ -954,7 +954,7 @@ mod tests { )); // Block weight cleaned up on `System::initialize` - assert_eq!(>::block_weight().total(), base_block_weight); + assert_eq!(>::block_weight().total(), base_block_weight); }); } @@ -989,7 +989,7 @@ mod tests { let execute_with_lock = |lock: WithdrawReasons| { let mut t = new_test_ext(1); t.execute_with(|| { - as LockableCurrency>::set_lock( + as LockableCurrency>::set_lock( id, &1, 110, @@ -1017,13 +1017,13 @@ mod tests { if lock == WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT) { assert!(Executive::apply_extrinsic(xt).unwrap().is_ok()); // tx fee has been deducted. - assert_eq!(>::total_balance(&1), 111 - fee); + assert_eq!(>::total_balance(&1), 111 - fee); } else { assert_eq!( Executive::apply_extrinsic(xt), Err(InvalidTransaction::Payment.into()), ); - assert_eq!(>::total_balance(&1), 111); + assert_eq!(>::total_balance(&1), 111); } }); }; @@ -1041,7 +1041,7 @@ mod tests { // NOTE: might need updates over time if new weights are introduced. // For now it only accounts for the base block execution weight and // the `on_initialize` weight defined in the custom test module. - assert_eq!(>::block_weight().total(), 175 + 175 + 10); + assert_eq!(>::block_weight().total(), 175 + 175 + 10); }) } @@ -1159,16 +1159,16 @@ mod tests { )); // All weights that show up in the `initialize_block_impl` - let frame_system_upgrade_weight = frame_system::Module::::on_runtime_upgrade(); + let frame_system_upgrade_weight = frame_system::Pallet::::on_runtime_upgrade(); let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); - let runtime_upgrade_weight = ::on_runtime_upgrade(); - let frame_system_on_initialize_weight = frame_system::Module::::on_initialize(block_number); - let on_initialize_weight = >::on_initialize(block_number); + let runtime_upgrade_weight = ::on_runtime_upgrade(); + let frame_system_on_initialize_weight = frame_system::Pallet::::on_initialize(block_number); + let on_initialize_weight = >::on_initialize(block_number); let base_block_weight = ::BlockWeights::get().base_block; // Weights are recorded correctly assert_eq!( - frame_system::Module::::block_weight().total(), + frame_system::Pallet::::block_weight().total(), frame_system_upgrade_weight + custom_runtime_upgrade_weight + runtime_upgrade_weight + diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs index ab35ce76742bc..23596a8b6e147 100644 --- a/frame/gilt/src/lib.rs +++ b/frame/gilt/src/lib.rs @@ -78,7 +78,7 @@ pub mod weights; pub mod pallet { use sp_std::prelude::*; use sp_arithmetic::{Perquintill, PerThing}; - use sp_runtime::traits::{Zero, Saturating, SaturatedConversion}; + use sp_runtime::traits::{Zero, Saturating}; use frame_support::traits::{Currency, OnUnbalanced, ReservableCurrency}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -96,7 +96,13 @@ pub mod pallet { type Event: From> + IsType<::Event>; /// Currency type that this works on. - type Currency: ReservableCurrency; + type Currency: ReservableCurrency; + + /// Just the `Currency::Balance` type; we have this item to allow us to constrain it to + /// `From`. + type CurrencyBalance: + sp_runtime::traits::AtLeast32BitUnsigned + codec::FullCodec + Copy + + MaybeSerializeDeserialize + sp_std::fmt::Debug + Default + From; /// Origin required for setting the target proportion to be under gilt. type AdminOrigin: EnsureOrigin; @@ -440,7 +446,7 @@ pub mod pallet { let gilt = Active::::get(index).ok_or(Error::::Unknown)?; // If found, check the owner is `who`. ensure!(gilt.who == who, Error::::NotOwner); - let now = frame_system::Module::::block_number(); + let now = frame_system::Pallet::::block_number(); ensure!(now >= gilt.expiry, Error::::NotExpired); // Remove it Active::::remove(index); @@ -448,11 +454,10 @@ pub mod pallet { // Multiply the proportion it is by the total issued. let total_issuance = T::Currency::total_issuance().saturating_sub(T::IgnoredIssuance::get()); ActiveTotal::::mutate(|totals| { - let nongilt_issuance: u128 = total_issuance.saturating_sub(totals.frozen) - .saturated_into(); + let nongilt_issuance = total_issuance.saturating_sub(totals.frozen); let effective_issuance = totals.proportion.left_from_one() .saturating_reciprocal_mul(nongilt_issuance); - let gilt_value: BalanceOf = (gilt.proportion * effective_issuance).saturated_into(); + let gilt_value = gilt.proportion * effective_issuance; totals.frozen = totals.frozen.saturating_sub(gilt.amount); totals.proportion = totals.proportion.saturating_sub(gilt.proportion); @@ -488,7 +493,40 @@ pub mod pallet { } } + /// Issuance information returned by `issuance()`. + pub struct IssuanceInfo { + /// The balance held in reserve over all active gilts. + pub reserved: Balance, + /// The issuance not held in reserve for active gilts. Together with `reserved` this sums to + /// `Currency::total_issuance`. + pub non_gilt: Balance, + /// The balance that `reserved` is effectively worth, at present. This is not issued funds + /// and could be less than `reserved` (though in most cases should be greater). + pub effective: Balance, + } + impl Pallet { + /// Get the target amount of Gilts that we're aiming for. + pub fn target() -> Perquintill { + ActiveTotal::::get().target + } + + /// Returns information on the issuance of gilts. + pub fn issuance() -> IssuanceInfo> { + let totals = ActiveTotal::::get(); + + let total_issuance = T::Currency::total_issuance(); + let non_gilt = total_issuance.saturating_sub(totals.frozen); + let effective = totals.proportion.left_from_one() + .saturating_reciprocal_mul(non_gilt); + + IssuanceInfo { + reserved: totals.frozen, + non_gilt, + effective, + } + } + /// Attempt to enlarge our gilt-set from bids in order to satisfy our desired target amount /// of funds frozen into gilts. pub fn pursue_target(max_bids: u32) -> Weight { @@ -497,11 +535,10 @@ pub mod pallet { let missing = totals.target.saturating_sub(totals.proportion); let total_issuance = T::Currency::total_issuance().saturating_sub(T::IgnoredIssuance::get()); - let nongilt_issuance: u128 = total_issuance.saturating_sub(totals.frozen) - .saturated_into(); + let nongilt_issuance = total_issuance.saturating_sub(totals.frozen); let effective_issuance = totals.proportion.left_from_one() .saturating_reciprocal_mul(nongilt_issuance); - let intake: BalanceOf = (missing * effective_issuance).saturated_into(); + let intake = missing * effective_issuance; let (bids_taken, queues_hit) = Self::enlarge(intake, max_bids); let first_from_each_queue = T::WeightInfo::pursue_target_per_queue(queues_hit); @@ -525,7 +562,7 @@ pub mod pallet { let mut remaining = amount; let mut bids_taken = 0; let mut queues_hit = 0; - let now = frame_system::Module::::block_number(); + let now = frame_system::Pallet::::block_number(); ActiveTotal::::mutate(|totals| { QueueTotals::::mutate(|qs| { @@ -550,13 +587,12 @@ pub mod pallet { qs[queue_index].1 = qs[queue_index].1.saturating_sub(bid.amount); // Now to activate the bid... - let nongilt_issuance: u128 = total_issuance.saturating_sub(totals.frozen) - .saturated_into(); + let nongilt_issuance = total_issuance.saturating_sub(totals.frozen); let effective_issuance = totals.proportion.left_from_one() .saturating_reciprocal_mul(nongilt_issuance); - let n: u128 = amount.saturated_into(); + let n = amount; let d = effective_issuance; - let proportion = Perquintill::from_rational_approximation(n, d); + let proportion = Perquintill::from_rational(n, d); let who = bid.who; let index = totals.index; totals.frozen += bid.amount; diff --git a/frame/gilt/src/mock.rs b/frame/gilt/src/mock.rs index ca4ccaff73c58..1abb92ed3dfac 100644 --- a/frame/gilt/src/mock.rs +++ b/frame/gilt/src/mock.rs @@ -36,9 +36,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Config, Storage, Event}, - Gilt: pallet_gilt::{Module, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Config, Storage, Event}, + Gilt: pallet_gilt::{Pallet, Call, Config, Storage, Event}, } ); @@ -103,6 +103,7 @@ ord_parameter_types! { impl pallet_gilt::Config for Test { type Event = Event; type Currency = Balances; + type CurrencyBalance = ::Balance; type AdminOrigin = frame_system::EnsureSignedBy; type Deficit = (); type Surplus = (); diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 2bf7306f58e15..547e3966d52a4 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -39,7 +39,7 @@ pallet-offences = { version = "3.0.0", path = "../offences" } pallet-staking = { version = "3.0.0", path = "../staking" } pallet-staking-reward-curve = { version = "3.0.0", path = "../staking/reward-curve" } pallet-timestamp = { version = "3.0.0", path = "../timestamp" } -sp-election-providers = { version = "3.0.0", path = "../../primitives/election-providers" } +frame-election-provider-support = { version = "3.0.0", path = "../election-provider-support" } [features] default = ["std"] diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index 37496fdeb8592..8ab86b2fed065 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -358,7 +358,7 @@ impl Offence fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational_approximation(3 * offenders_count, validator_set_count); + let x = Perbill::from_rational(3 * offenders_count, validator_set_count); // _ ^ 2 x.square() } diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index b68624df7b5dc..eb3dc4f110acb 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -390,7 +390,7 @@ impl Module { /// Cannot be done when already paused. pub fn schedule_pause(in_blocks: T::BlockNumber) -> DispatchResult { if let StoredState::Live = >::get() { - let scheduled_at = >::block_number(); + let scheduled_at = >::block_number(); >::put(StoredState::PendingPause { delay: in_blocks, scheduled_at, @@ -405,7 +405,7 @@ impl Module { /// Schedule a resume of GRANDPA after pausing. pub fn schedule_resume(in_blocks: T::BlockNumber) -> DispatchResult { if let StoredState::Paused = >::get() { - let scheduled_at = >::block_number(); + let scheduled_at = >::block_number(); >::put(StoredState::PendingResume { delay: in_blocks, scheduled_at, @@ -437,7 +437,7 @@ impl Module { forced: Option, ) -> DispatchResult { if !>::exists() { - let scheduled_at = >::block_number(); + let scheduled_at = >::block_number(); if let Some(_) = forced { if Self::next_forced().map_or(false, |next| next > scheduled_at) { @@ -465,7 +465,7 @@ impl Module { /// Deposit one of this module's logs. fn deposit_log(log: ConsensusLog) { let log: DigestItem = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()); - >::deposit_log(log.into()); + >::deposit_log(log.into()); } // Perform module initialization, abstracted so that it can be called either through genesis diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 0a24a2344547e..3f450e18bc783 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -40,7 +40,7 @@ use sp_runtime::{ }; use sp_staking::SessionIndex; use pallet_session::historical as pallet_session_historical; -use sp_election_providers::onchain; +use frame_election_provider_support::onchain; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -51,14 +51,14 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Staking: pallet_staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, - Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event, ValidateUnsigned}, - Offences: pallet_offences::{Module, Call, Storage, Event}, - Historical: pallet_session_historical::{Module}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event, ValidateUnsigned}, + Offences: pallet_offences::{Pallet, Call, Storage, Event}, + Historical: pallet_session_historical::{Pallet}, } ); @@ -193,11 +193,13 @@ parameter_types! { impl onchain::Config for Test { type AccountId = ::AccountId; type BlockNumber = ::BlockNumber; + type BlockWeights = (); type Accuracy = Perbill; type DataProvider = Staking; } impl pallet_staking::Config for Test { + const MAX_NOMINATIONS: u32 = 16; type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type Event = Event; @@ -209,16 +211,10 @@ impl pallet_staking::Config for Test { type SlashDeferDuration = SlashDeferDuration; type SlashCancelOrigin = frame_system::EnsureRoot; type SessionInterface = Self; - type UnixTime = pallet_timestamp::Module; - type RewardCurve = RewardCurve; + type UnixTime = pallet_timestamp::Pallet; + type EraPayout = pallet_staking::ConvertCurve; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type NextNewSession = Session; - type ElectionLookahead = ElectionLookahead; - type Call = Call; - type UnsignedPriority = StakingUnsignedPriority; - type MaxIterations = (); - type MinSolutionScoreBump = (); - type OffchainSolutionWeightLimit = (); type ElectionProvider = onchain::OnChainSequentialPhragmen; type WeightInfo = (); } diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 645b3817d6ec9..372abc72a97dd 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -25,12 +25,12 @@ use frame_system::{EventRecord, RawOrigin}; use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; -use crate::Module as Identity; +use crate::Pallet as Identity; const SEED: u32 = 0; fn assert_last_event(generic_event: ::Event) { - let events = frame_system::Module::::events(); + let events = frame_system::Pallet::::events(); let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index fed32afa2e62f..880d202795922 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -17,8 +17,8 @@ //! # Identity Module //! -//! - [`identity::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! @@ -598,7 +598,8 @@ decl_module! { T::Currency::reserve(&sender, id.deposit - old_deposit)?; } if old_deposit > id.deposit { - let _ = T::Currency::unreserve(&sender, old_deposit - id.deposit); + let err_amount = T::Currency::unreserve(&sender, old_deposit - id.deposit); + debug_assert!(err_amount.is_zero()); } let judgements = id.judgements.len(); @@ -655,7 +656,8 @@ decl_module! { if old_deposit < new_deposit { T::Currency::reserve(&sender, new_deposit - old_deposit)?; } else if old_deposit > new_deposit { - let _ = T::Currency::unreserve(&sender, old_deposit - new_deposit); + let err_amount = T::Currency::unreserve(&sender, old_deposit - new_deposit); + debug_assert!(err_amount.is_zero()); } // do nothing if they're equal. @@ -713,7 +715,8 @@ decl_module! { >::remove(sub); } - let _ = T::Currency::unreserve(&sender, deposit.clone()); + let err_amount = T::Currency::unreserve(&sender, deposit.clone()); + debug_assert!(err_amount.is_zero()); Self::deposit_event(RawEvent::IdentityCleared(sender, deposit)); @@ -819,7 +822,8 @@ decl_module! { Err(Error::::JudgementGiven)? }; - let _ = T::Currency::unreserve(&sender, fee); + let err_amount = T::Currency::unreserve(&sender, fee); + debug_assert!(err_amount.is_zero()); let judgements = id.judgements.len(); let extra_fields = id.info.additional.len(); >::insert(&sender, id); @@ -1095,7 +1099,8 @@ decl_module! { sub_ids.retain(|x| x != &sub); let deposit = T::SubAccountDeposit::get().min(*subs_deposit); *subs_deposit -= deposit; - let _ = T::Currency::unreserve(&sender, deposit); + let err_amount = T::Currency::unreserve(&sender, deposit); + debug_assert!(err_amount.is_zero()); Self::deposit_event(RawEvent::SubIdentityRemoved(sub, sender, deposit)); }); } diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 230079a21ea0d..a996c989a9185 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -37,9 +37,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Identity: pallet_identity::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Identity: pallet_identity::{Pallet, Call, Storage, Event}, } ); diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index e00b5aa9d1395..d8f3fdc854b16 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -30,9 +30,9 @@ //! as the [NetworkState](../../client/offchain/struct.NetworkState.html). //! It is submitted as an Unsigned Transaction via off-chain workers. //! -//! - [`im_online::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Module`] //! //! ## Interface //! @@ -100,11 +100,7 @@ use frame_support::{ }, Parameter, }; -use frame_system::ensure_none; -use frame_system::offchain::{ - SendTransactionTypes, - SubmitTransaction, -}; +use frame_system::{ensure_none, offchain::{SendTransactionTypes, SubmitTransaction}}; pub use weights::WeightInfo; pub mod sr25519 { @@ -672,7 +668,7 @@ impl OneSessionHandler for Module { // Tell the offchain worker to start making the next session's heartbeats. // Since we consider producing blocks as being online, // the heartbeat is deferred a bit to prevent spamming. - let block_number = >::block_number(); + let block_number = >::block_number(); let half_session = T::NextSessionRotation::average_session_length() / 2u32.into(); >::put(block_number + half_session); @@ -813,7 +809,7 @@ impl Offence for UnresponsivenessOffence { // basically, 10% can be offline with no slash, but after that, it linearly climbs up to 7% // when 13/30 are offline (around 5% when 1/3 are offline). if let Some(threshold) = offenders.checked_sub(validator_set_count / 10 + 1) { - let x = Perbill::from_rational_approximation(3 * threshold, validator_set_count); + let x = Perbill::from_rational(3 * threshold, validator_set_count); x.saturating_mul(Perbill::from_percent(7)) } else { Perbill::default() diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index f8346aa536248..35028dd89df4e 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -44,10 +44,10 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, - ImOnline: imonline::{Module, Call, Storage, Config, Event}, - Historical: pallet_session_historical::{Module}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + ImOnline: imonline::{Pallet, Call, Storage, Config, Event}, + Historical: pallet_session_historical::{Pallet}, } ); diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index 06c73b1a9bc27..01db4b50f5085 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -33,9 +33,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Indices: pallet_indices::{Module, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Indices: pallet_indices::{Pallet, Call, Storage, Config, Event}, } ); diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs index 8248caa06708e..94b7dd459889a 100644 --- a/frame/lottery/src/lib.rs +++ b/frame/lottery/src/lib.rs @@ -277,7 +277,7 @@ decl_module! { ensure!(lottery.is_none(), Error::::InProgress); let index = LotteryIndex::get(); let new_index = index.checked_add(1).ok_or(Error::::Overflow)?; - let start = frame_system::Module::::block_number(); + let start = frame_system::Pallet::::block_number(); // Use new_index to more easily track everything with the current state. *lottery = Some(LotteryConfig { price, @@ -324,7 +324,8 @@ decl_module! { let winning_number = Self::choose_winner(ticket_count); let winner = Tickets::::get(winning_number).unwrap_or(lottery_account); // Not much we can do if this fails... - let _ = T::Currency::transfer(&Self::account_id(), &winner, lottery_balance, KeepAlive); + let res = T::Currency::transfer(&Self::account_id(), &winner, lottery_balance, KeepAlive); + debug_assert!(res.is_ok()); Self::deposit_event(RawEvent::Winner(winner, lottery_balance)); @@ -392,7 +393,7 @@ impl Module { fn do_buy_ticket(caller: &T::AccountId, call: &::Call) -> DispatchResult { // Check the call is valid lottery let config = Lottery::::get().ok_or(Error::::NotConfigured)?; - let block_number = frame_system::Module::::block_number(); + let block_number = frame_system::Pallet::::block_number(); ensure!(block_number < config.start.saturating_add(config.length), Error::::AlreadyEnded); ensure!(T::ValidateCall::validate_call(call), Error::::InvalidCall); let call_index = Self::call_to_index(call)?; diff --git a/frame/lottery/src/mock.rs b/frame/lottery/src/mock.rs index 44691427c8e59..a776896921a7f 100644 --- a/frame/lottery/src/mock.rs +++ b/frame/lottery/src/mock.rs @@ -42,9 +42,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Lottery: pallet_lottery::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Lottery: pallet_lottery::{Pallet, Call, Storage, Event}, } ); diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index f080938095445..e26af3ce9b71a 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -293,8 +293,8 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Membership: pallet_membership::{Module, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Membership: pallet_membership::{Pallet, Call, Storage, Config, Event}, } ); diff --git a/frame/merkle-mountain-range/primitives/src/lib.rs b/frame/merkle-mountain-range/primitives/src/lib.rs index 0887535dca0ef..73d4d3ecc1fc3 100644 --- a/frame/merkle-mountain-range/primitives/src/lib.rs +++ b/frame/merkle-mountain-range/primitives/src/lib.rs @@ -51,10 +51,10 @@ impl LeafDataProvider for () { /// so that any point in time in the future we can receive a proof about some past /// blocks without using excessive on-chain storage. /// -/// Hence we implement the [LeafDataProvider] for [frame_system::Module]. Since the +/// Hence we implement the [LeafDataProvider] for [frame_system::Pallet]. Since the /// current block hash is not available (since the block is not finished yet), /// we use the `parent_hash` here along with parent block number. -impl LeafDataProvider for frame_system::Module { +impl LeafDataProvider for frame_system::Pallet { type LeafData = ( ::BlockNumber, ::Hash diff --git a/frame/merkle-mountain-range/src/mock.rs b/frame/merkle-mountain-range/src/mock.rs index 0adb0294d5080..072724a58afe5 100644 --- a/frame/merkle-mountain-range/src/mock.rs +++ b/frame/merkle-mountain-range/src/mock.rs @@ -40,8 +40,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - MMR: pallet_mmr::{Module, Call, Storage}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + MMR: pallet_mmr::{Pallet, Call, Storage}, } ); @@ -78,7 +78,7 @@ impl Config for Test { type Hashing = Keccak256; type Hash = H256; - type LeafData = Compact, LeafData)>; + type LeafData = Compact, LeafData)>; type OnNewRoot = (); type WeightInfo = (); } diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index ea522dc51cd03..dfaf60ef2eab6 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -39,11 +39,11 @@ fn register_offchain_ext(ext: &mut sp_io::TestExternalities) { } fn new_block() -> u64 { - let number = frame_system::Module::::block_number() + 1; + let number = frame_system::Pallet::::block_number() + 1; let hash = H256::repeat_byte(number as u8); LEAF_DATA.with(|r| r.borrow_mut().a = number); - frame_system::Module::::initialize( + frame_system::Pallet::::initialize( &number, &hash, &Default::default(), diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index aa72d2d1ad3ca..8c8e1c0dbc436 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -18,8 +18,8 @@ //! # Multisig Module //! A module for doing multisig dispatch. //! -//! - [`multisig::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! @@ -436,7 +436,8 @@ decl_module! { ensure!(m.when == timepoint, Error::::WrongTimepoint); ensure!(m.depositor == who, Error::::NotOwner); - let _ = T::Currency::unreserve(&m.depositor, m.deposit); + let err_amount = T::Currency::unreserve(&m.depositor, m.deposit); + debug_assert!(err_amount.is_zero()); >::remove(&id, &call_hash); Self::clear_call(&call_hash); @@ -638,8 +639,8 @@ impl Module { /// The current `Timepoint`. pub fn timepoint() -> Timepoint { Timepoint { - height: >::block_number(), - index: >::extrinsic_index().unwrap_or_default(), + height: >::block_number(), + index: >::extrinsic_index().unwrap_or_default(), } } diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index a3f47a26e6422..a3a3edc34f1a9 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -37,9 +37,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Multisig: pallet_multisig::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Multisig: pallet_multisig::{Pallet, Call, Storage, Event}, } ); @@ -124,7 +124,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { } fn last_event() -> Event { - system::Module::::events().pop().map(|e| e.event).expect("Event expected") + system::Pallet::::events().pop().map(|e| e.event).expect("Event expected") } fn expect_event>(e: E) { diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 681a45626fbca..67e62a09da64a 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -17,8 +17,8 @@ //! # Nicks Module //! -//! - [`nicks::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! @@ -179,7 +179,8 @@ decl_module! { let deposit = >::take(&sender).ok_or(Error::::Unnamed)?.1; - let _ = T::Currency::unreserve(&sender, deposit.clone()); + let err_amount = T::Currency::unreserve(&sender, deposit.clone()); + debug_assert!(err_amount.is_zero()); Self::deposit_event(RawEvent::NameCleared(sender, deposit)); } @@ -257,9 +258,9 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Nicks: pallet_nicks::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Nicks: pallet_nicks::{Pallet, Call, Storage, Event}, } ); diff --git a/frame/node-authorization/Cargo.toml b/frame/node-authorization/Cargo.toml index 245db9176f740..786eb84d1e523 100644 --- a/frame/node-authorization/Cargo.toml +++ b/frame/node-authorization/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-node-authorization" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 090be28492630..5f233549c73ca 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -37,117 +37,137 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +pub mod weights; + use sp_core::OpaquePeerId as PeerId; use sp_std::{ collections::btree_set::BTreeSet, iter::FromIterator, prelude::*, }; -use codec::Decode; -use frame_support::{ - decl_module, decl_storage, decl_event, decl_error, - ensure, weights::{DispatchClass, Weight}, traits::{Get, EnsureOrigin}, -}; -use frame_system::ensure_signed; - -pub trait WeightInfo { - fn add_well_known_node() -> Weight; - fn remove_well_known_node() -> Weight; - fn swap_well_known_node() -> Weight; - fn reset_well_known_nodes() -> Weight; - fn claim_node() -> Weight; - fn remove_claim() -> Weight; - fn transfer_node() -> Weight; - fn add_connections() -> Weight; - fn remove_connections() -> Weight; -} +pub use pallet::*; +pub use weights::WeightInfo; -impl WeightInfo for () { - fn add_well_known_node() -> Weight { 50_000_000 } - fn remove_well_known_node() -> Weight { 50_000_000 } - fn swap_well_known_node() -> Weight { 50_000_000 } - fn reset_well_known_nodes() -> Weight { 50_000_000 } - fn claim_node() -> Weight { 50_000_000 } - fn remove_claim() -> Weight { 50_000_000 } - fn transfer_node() -> Weight { 50_000_000 } - fn add_connections() -> Weight { 50_000_000 } - fn remove_connections() -> Weight { 50_000_000 } -} - -pub trait Config: frame_system::Config { - /// The event type of this module. - type Event: From> + Into<::Event>; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{ + dispatch::DispatchResult, + pallet_prelude::*, + }; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// The module configuration trait + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// The maximum number of well known nodes that are allowed to set + #[pallet::constant] + type MaxWellKnownNodes: Get; - /// The maximum number of well known nodes that are allowed to set - type MaxWellKnownNodes: Get; + /// The maximum length in bytes of PeerId + #[pallet::constant] + type MaxPeerIdLength: Get; - /// The maximum length in bytes of PeerId - type MaxPeerIdLength: Get; + /// The origin which can add a well known node. + type AddOrigin: EnsureOrigin; - /// The origin which can add a well known node. - type AddOrigin: EnsureOrigin; + /// The origin which can remove a well known node. + type RemoveOrigin: EnsureOrigin; - /// The origin which can remove a well known node. - type RemoveOrigin: EnsureOrigin; + /// The origin which can swap the well known nodes. + type SwapOrigin: EnsureOrigin; - /// The origin which can swap the well known nodes. - type SwapOrigin: EnsureOrigin; + /// The origin which can reset the well known nodes. + type ResetOrigin: EnsureOrigin; - /// The origin which can reset the well known nodes. - type ResetOrigin: EnsureOrigin; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + /// The set of well known nodes. This is stored sorted (just by value). + #[pallet::storage] + #[pallet::getter(fn well_known_nodes)] + pub type WellKnownNodes = StorageValue<_, BTreeSet, ValueQuery>; + + /// A map that maintains the ownership of each node. + #[pallet::storage] + #[pallet::getter(fn owners)] + pub type Owners = StorageMap< + _, + Blake2_128Concat, + PeerId, + T::AccountId, + >; + + /// The additional adapative connections of each node. + #[pallet::storage] + #[pallet::getter(fn additional_connection)] + pub type AdditionalConnections = StorageMap< + _, + Blake2_128Concat, + PeerId, + BTreeSet, + ValueQuery, + >; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub nodes: Vec<(PeerId, T::AccountId)>, + } -decl_storage! { - trait Store for Module as NodeAuthorization { - /// The set of well known nodes. This is stored sorted (just by value). - pub WellKnownNodes get(fn well_known_nodes): BTreeSet; - /// A map that maintains the ownership of each node. - pub Owners get(fn owners): - map hasher(blake2_128_concat) PeerId => T::AccountId; - /// The additional adapative connections of each node. - pub AdditionalConnections get(fn additional_connection): - map hasher(blake2_128_concat) PeerId => BTreeSet; + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { nodes: Vec::new() } + } } - add_extra_genesis { - config(nodes): Vec<(PeerId, T::AccountId)>; - build(|config: &GenesisConfig| { - >::initialize_nodes(&config.nodes) - }) + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + Pallet::::initialize_nodes(&self.nodes); + } } -} -decl_event! { - pub enum Event where - ::AccountId, - { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + #[pallet::metadata(T::AccountId = "AccountId")] + pub enum Event { /// The given well known node was added. - NodeAdded(PeerId, AccountId), + NodeAdded(PeerId, T::AccountId), /// The given well known node was removed. NodeRemoved(PeerId), /// The given well known node was swapped; first item was removed, /// the latter was added. NodeSwapped(PeerId, PeerId), /// The given well known nodes were reset. - NodesReset(Vec<(PeerId, AccountId)>), + NodesReset(Vec<(PeerId, T::AccountId)>), /// The given node was claimed by a user. - NodeClaimed(PeerId, AccountId), + NodeClaimed(PeerId, T::AccountId), /// The given claim was removed by its owner. - ClaimRemoved(PeerId, AccountId), + ClaimRemoved(PeerId, T::AccountId), /// The node was transferred to another account. - NodeTransferred(PeerId, AccountId), + NodeTransferred(PeerId, T::AccountId), /// The allowed connections were added to a node. ConnectionsAdded(PeerId, Vec), /// The allowed connections were removed from a node. ConnectionsRemoved(PeerId, Vec), } -} -decl_error! { - /// Error for the node authorization module. - pub enum Error for Module { + #[pallet::error] + pub enum Error { /// The PeerId is too long. PeerIdTooLong, /// Too many well known nodes. @@ -165,41 +185,65 @@ decl_error! { /// No permisson to perform specific operation. PermissionDenied, } -} -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// The maximum number of authorized well known nodes - const MaxWellKnownNodes: u32 = T::MaxWellKnownNodes::get(); - - /// The maximum length in bytes of PeerId - const MaxPeerIdLength: u32 = T::MaxPeerIdLength::get(); - - type Error = Error; - - fn deposit_event() = default; + #[pallet::hooks] + impl Hooks> for Pallet { + /// Set reserved node every block. It may not be enabled depends on the offchain + /// worker settings when starting the node. + fn offchain_worker(now: T::BlockNumber) { + let network_state = sp_io::offchain::network_state(); + match network_state { + Err(_) => log::error!( + target: "runtime::node-authorization", + "Error: failed to get network state of node at {:?}", + now, + ), + Ok(state) => { + let encoded_peer = state.peer_id.0; + match Decode::decode(&mut &encoded_peer[..]) { + Err(_) => log::error!( + target: "runtime::node-authorization", + "Error: failed to decode PeerId at {:?}", + now, + ), + Ok(node) => sp_io::offchain::set_authorized_nodes( + Self::get_authorized_nodes(&PeerId(node)), + true + ) + } + } + } + } + } + #[pallet::call] + impl Pallet { /// Add a node to the set of well known nodes. If the node is already claimed, the owner /// will be updated and keep the existing additional connection unchanged. /// /// May only be called from `T::AddOrigin`. /// /// - `node`: identifier of the node. - #[weight = (T::WeightInfo::add_well_known_node(), DispatchClass::Operational)] - pub fn add_well_known_node(origin, node: PeerId, owner: T::AccountId) { + #[pallet::weight((T::WeightInfo::add_well_known_node(), DispatchClass::Operational))] + pub fn add_well_known_node( + origin: OriginFor, + node: PeerId, + owner: T::AccountId + ) -> DispatchResult { T::AddOrigin::ensure_origin(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - let mut nodes = WellKnownNodes::get(); + let mut nodes = WellKnownNodes::::get(); ensure!(nodes.len() < T::MaxWellKnownNodes::get() as usize, Error::::TooManyNodes); ensure!(!nodes.contains(&node), Error::::AlreadyJoined); nodes.insert(node.clone()); - WellKnownNodes::put(&nodes); + WellKnownNodes::::put(&nodes); >::insert(&node, &owner); - Self::deposit_event(RawEvent::NodeAdded(node, owner)); + Self::deposit_event(Event::NodeAdded(node, owner)); + Ok(()) } /// Remove a node from the set of well known nodes. The ownership and additional @@ -208,21 +252,22 @@ decl_module! { /// May only be called from `T::RemoveOrigin`. /// /// - `node`: identifier of the node. - #[weight = (T::WeightInfo::remove_well_known_node(), DispatchClass::Operational)] - pub fn remove_well_known_node(origin, node: PeerId) { + #[pallet::weight((T::WeightInfo::remove_well_known_node(), DispatchClass::Operational))] + pub fn remove_well_known_node(origin: OriginFor, node: PeerId) -> DispatchResult { T::RemoveOrigin::ensure_origin(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - let mut nodes = WellKnownNodes::get(); + let mut nodes = WellKnownNodes::::get(); ensure!(nodes.contains(&node), Error::::NotExist); nodes.remove(&node); - WellKnownNodes::put(&nodes); + WellKnownNodes::::put(&nodes); >::remove(&node); - AdditionalConnections::remove(&node); + AdditionalConnections::::remove(&node); - Self::deposit_event(RawEvent::NodeRemoved(node)); + Self::deposit_event(Event::NodeRemoved(node)); + Ok(()) } /// Swap a well known node to another. Both the ownership and additional connections @@ -232,26 +277,34 @@ decl_module! { /// /// - `remove`: the node which will be moved out from the list. /// - `add`: the node which will be put in the list. - #[weight = (T::WeightInfo::swap_well_known_node(), DispatchClass::Operational)] - pub fn swap_well_known_node(origin, remove: PeerId, add: PeerId) { + #[pallet::weight((T::WeightInfo::swap_well_known_node(), DispatchClass::Operational))] + pub fn swap_well_known_node( + origin: OriginFor, + remove: PeerId, + add: PeerId + ) -> DispatchResult { T::SwapOrigin::ensure_origin(origin)?; - ensure!(remove.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); + ensure!( + remove.0.len() < T::MaxPeerIdLength::get() as usize, + Error::::PeerIdTooLong + ); ensure!(add.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); if remove == add { return Ok(()) } - let mut nodes = WellKnownNodes::get(); + let mut nodes = WellKnownNodes::::get(); ensure!(nodes.contains(&remove), Error::::NotExist); ensure!(!nodes.contains(&add), Error::::AlreadyJoined); nodes.remove(&remove); nodes.insert(add.clone()); - WellKnownNodes::put(&nodes); + WellKnownNodes::::put(&nodes); Owners::::swap(&remove, &add); - AdditionalConnections::swap(&remove, &add); + AdditionalConnections::::swap(&remove, &add); - Self::deposit_event(RawEvent::NodeSwapped(remove, add)); + Self::deposit_event(Event::NodeSwapped(remove, add)); + Ok(()) } /// Reset all the well known nodes. This will not remove the ownership and additional @@ -261,29 +314,34 @@ decl_module! { /// May only be called from `T::ResetOrigin`. /// /// - `nodes`: the new nodes for the allow list. - #[weight = (T::WeightInfo::reset_well_known_nodes(), DispatchClass::Operational)] - pub fn reset_well_known_nodes(origin, nodes: Vec<(PeerId, T::AccountId)>) { + #[pallet::weight((T::WeightInfo::reset_well_known_nodes(), DispatchClass::Operational))] + pub fn reset_well_known_nodes( + origin: OriginFor, + nodes: Vec<(PeerId, T::AccountId)> + ) -> DispatchResult { T::ResetOrigin::ensure_origin(origin)?; ensure!(nodes.len() < T::MaxWellKnownNodes::get() as usize, Error::::TooManyNodes); Self::initialize_nodes(&nodes); - Self::deposit_event(RawEvent::NodesReset(nodes)); + Self::deposit_event(Event::NodesReset(nodes)); + Ok(()) } /// A given node can be claimed by anyone. The owner should be the first to know its /// PeerId, so claim it right away! /// /// - `node`: identifier of the node. - #[weight = T::WeightInfo::claim_node()] - pub fn claim_node(origin, node: PeerId) { + #[pallet::weight(T::WeightInfo::claim_node())] + pub fn claim_node(origin: OriginFor, node: PeerId) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); ensure!(!Owners::::contains_key(&node),Error::::AlreadyClaimed); Owners::::insert(&node, &sender); - Self::deposit_event(RawEvent::NodeClaimed(node, sender)); + Self::deposit_event(Event::NodeClaimed(node, sender)); + Ok(()) } /// A claim can be removed by its owner and get back the reservation. The additional @@ -291,55 +349,61 @@ decl_module! { /// needs to reach consensus among the network participants. /// /// - `node`: identifier of the node. - #[weight = T::WeightInfo::remove_claim()] - pub fn remove_claim(origin, node: PeerId) { + #[pallet::weight(T::WeightInfo::remove_claim())] + pub fn remove_claim(origin: OriginFor, node: PeerId) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - ensure!(Owners::::contains_key(&node), Error::::NotClaimed); - ensure!(Owners::::get(&node) == sender, Error::::NotOwner); - ensure!(!WellKnownNodes::get().contains(&node), Error::::PermissionDenied); + let owner = Owners::::get(&node).ok_or(Error::::NotClaimed)?; + ensure!(owner == sender, Error::::NotOwner); + ensure!(!WellKnownNodes::::get().contains(&node), Error::::PermissionDenied); Owners::::remove(&node); - AdditionalConnections::remove(&node); + AdditionalConnections::::remove(&node); - Self::deposit_event(RawEvent::ClaimRemoved(node, sender)); + Self::deposit_event(Event::ClaimRemoved(node, sender)); + Ok(()) } /// A node can be transferred to a new owner. /// /// - `node`: identifier of the node. /// - `owner`: new owner of the node. - #[weight = T::WeightInfo::transfer_node()] - pub fn transfer_node(origin, node: PeerId, owner: T::AccountId) { + #[pallet::weight(T::WeightInfo::transfer_node())] + pub fn transfer_node( + origin: OriginFor, + node: PeerId, + owner: T::AccountId + ) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - ensure!(Owners::::contains_key(&node), Error::::NotClaimed); - ensure!(Owners::::get(&node) == sender, Error::::NotOwner); + let pre_owner = Owners::::get(&node).ok_or(Error::::NotClaimed)?; + ensure!(pre_owner == sender, Error::::NotOwner); Owners::::insert(&node, &owner); - Self::deposit_event(RawEvent::NodeTransferred(node, owner)); + Self::deposit_event(Event::NodeTransferred(node, owner)); + Ok(()) } /// Add additional connections to a given node. /// /// - `node`: identifier of the node. /// - `connections`: additonal nodes from which the connections are allowed. - #[weight = T::WeightInfo::add_connections()] + #[pallet::weight(T::WeightInfo::add_connections())] pub fn add_connections( - origin, + origin: OriginFor, node: PeerId, connections: Vec - ) { + ) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - ensure!(Owners::::contains_key(&node), Error::::NotClaimed); - ensure!(Owners::::get(&node) == sender, Error::::NotOwner); + let owner = Owners::::get(&node).ok_or(Error::::NotClaimed)?; + ensure!(owner == sender, Error::::NotOwner); - let mut nodes = AdditionalConnections::get(&node); + let mut nodes = AdditionalConnections::::get(&node); for add_node in connections.iter() { if *add_node == node { @@ -348,73 +412,48 @@ decl_module! { nodes.insert(add_node.clone()); } - AdditionalConnections::insert(&node, nodes); + AdditionalConnections::::insert(&node, nodes); - Self::deposit_event(RawEvent::ConnectionsAdded(node, connections)); + Self::deposit_event(Event::ConnectionsAdded(node, connections)); + Ok(()) } /// Remove additional connections of a given node. /// /// - `node`: identifier of the node. /// - `connections`: additonal nodes from which the connections are not allowed anymore. - #[weight = T::WeightInfo::remove_connections()] + #[pallet::weight(T::WeightInfo::remove_connections())] pub fn remove_connections( - origin, + origin: OriginFor, node: PeerId, connections: Vec - ) { + ) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - ensure!(Owners::::contains_key(&node), Error::::NotClaimed); - ensure!(Owners::::get(&node) == sender, Error::::NotOwner); + let owner = Owners::::get(&node).ok_or(Error::::NotClaimed)?; + ensure!(owner == sender, Error::::NotOwner); - let mut nodes = AdditionalConnections::get(&node); + let mut nodes = AdditionalConnections::::get(&node); for remove_node in connections.iter() { nodes.remove(remove_node); } - AdditionalConnections::insert(&node, nodes); + AdditionalConnections::::insert(&node, nodes); - Self::deposit_event(RawEvent::ConnectionsRemoved(node, connections)); - } - - /// Set reserved node every block. It may not be enabled depends on the offchain - /// worker settings when starting the node. - fn offchain_worker(now: T::BlockNumber) { - let network_state = sp_io::offchain::network_state(); - match network_state { - Err(_) => log::error!( - target: "runtime::node-authorization", - "Error: failed to get network state of node at {:?}", - now, - ), - Ok(state) => { - let encoded_peer = state.peer_id.0; - match Decode::decode(&mut &encoded_peer[..]) { - Err(_) => log::error!( - target: "runtime::node-authorization", - "Error: failed to decode PeerId at {:?}", - now, - ), - Ok(node) => sp_io::offchain::set_authorized_nodes( - Self::get_authorized_nodes(&PeerId(node)), - true - ) - } - } - } + Self::deposit_event(Event::ConnectionsRemoved(node, connections)); + Ok(()) } } } -impl Module { +impl Pallet { fn initialize_nodes(nodes: &Vec<(PeerId, T::AccountId)>) { let peer_ids = nodes.iter() .map(|item| item.0.clone()) .collect::>(); - WellKnownNodes::put(&peer_ids); + WellKnownNodes::::put(&peer_ids); for (node, who) in nodes.iter() { Owners::::insert(node, who); @@ -422,9 +461,9 @@ impl Module { } fn get_authorized_nodes(node: &PeerId) -> Vec { - let mut nodes = AdditionalConnections::get(node); + let mut nodes = AdditionalConnections::::get(node); - let mut well_known_nodes = WellKnownNodes::get(); + let mut well_known_nodes = WellKnownNodes::::get(); if well_known_nodes.contains(node) { well_known_nodes.remove(node); nodes.extend(well_known_nodes); @@ -433,434 +472,3 @@ impl Module { Vec::from_iter(nodes) } } - -#[cfg(test)] -mod tests { - use super::*; - use crate as pallet_node_authorization; - - use frame_support::{assert_ok, assert_noop, parameter_types, ord_parameter_types}; - use frame_system::EnsureSignedBy; - use sp_core::H256; - use sp_runtime::{traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; - - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - type Block = frame_system::mocking::MockBlock; - - frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Module, Call, Config, Storage, Event}, - NodeAuthorization: pallet_node_authorization::{ - Module, Call, Storage, Config, Event, - }, - } - ); - - parameter_types! { - pub const BlockHashCount: u64 = 250; - } - impl frame_system::Config for Test { - type BaseCallFilter = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = Call; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - } - - ord_parameter_types! { - pub const One: u64 = 1; - pub const Two: u64 = 2; - pub const Three: u64 = 3; - pub const Four: u64 = 4; - } - parameter_types! { - pub const MaxWellKnownNodes: u32 = 4; - pub const MaxPeerIdLength: u32 = 2; - } - impl Config for Test { - type Event = Event; - type MaxWellKnownNodes = MaxWellKnownNodes; - type MaxPeerIdLength = MaxPeerIdLength; - type AddOrigin = EnsureSignedBy; - type RemoveOrigin = EnsureSignedBy; - type SwapOrigin = EnsureSignedBy; - type ResetOrigin = EnsureSignedBy; - type WeightInfo = (); - } - - fn test_node(id: u8) -> PeerId { - PeerId(vec![id]) - } - - fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_node_authorization::GenesisConfig:: { - nodes: vec![(test_node(10), 10), (test_node(20), 20), (test_node(30), 30)], - }.assimilate_storage(&mut t).unwrap(); - t.into() - } - - #[test] - fn add_well_known_node_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::add_well_known_node(Origin::signed(2), test_node(15), 15), - BadOrigin - ); - assert_noop!( - NodeAuthorization::add_well_known_node(Origin::signed(1), PeerId(vec![1, 2, 3]), 15), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(20), 20), - Error::::AlreadyJoined - ); - - assert_ok!( - NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(15), 15) - ); - assert_eq!( - WellKnownNodes::get(), - BTreeSet::from_iter(vec![test_node(10), test_node(15), test_node(20), test_node(30)]) - ); - assert_eq!(Owners::::get(test_node(10)), 10); - assert_eq!(Owners::::get(test_node(20)), 20); - assert_eq!(Owners::::get(test_node(30)), 30); - assert_eq!(Owners::::get(test_node(15)), 15); - - assert_noop!( - NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(25), 25), - Error::::TooManyNodes - ); - }); - } - - #[test] - fn remove_well_known_node_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::remove_well_known_node(Origin::signed(3), test_node(20)), - BadOrigin - ); - assert_noop!( - NodeAuthorization::remove_well_known_node(Origin::signed(2), PeerId(vec![1, 2, 3])), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(40)), - Error::::NotExist - ); - - AdditionalConnections::insert( - test_node(20), - BTreeSet::from_iter(vec![test_node(40)]) - ); - assert!(AdditionalConnections::contains_key(test_node(20))); - - assert_ok!( - NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(20)) - ); - assert_eq!( - WellKnownNodes::get(), - BTreeSet::from_iter(vec![test_node(10), test_node(30)]) - ); - assert!(!Owners::::contains_key(test_node(20))); - assert!(!AdditionalConnections::contains_key(test_node(20))); - }); - } - - #[test] - fn swap_well_known_node_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::swap_well_known_node( - Origin::signed(4), test_node(20), test_node(5) - ), - BadOrigin - ); - assert_noop!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), PeerId(vec![1, 2, 3]), test_node(20) - ), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(20), PeerId(vec![1, 2, 3]) - ), - Error::::PeerIdTooLong - ); - - assert_ok!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(20), test_node(20) - ) - ); - assert_eq!( - WellKnownNodes::get(), - BTreeSet::from_iter(vec![test_node(10), test_node(20), test_node(30)]) - ); - - assert_noop!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(15), test_node(5) - ), - Error::::NotExist - ); - assert_noop!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(20), test_node(30) - ), - Error::::AlreadyJoined - ); - - AdditionalConnections::insert( - test_node(20), - BTreeSet::from_iter(vec![test_node(15)]) - ); - assert_ok!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(20), test_node(5) - ) - ); - assert_eq!( - WellKnownNodes::get(), - BTreeSet::from_iter(vec![test_node(5), test_node(10), test_node(30)]) - ); - assert!(!Owners::::contains_key(test_node(20))); - assert_eq!(Owners::::get(test_node(5)), 20); - assert!(!AdditionalConnections::contains_key(test_node(20))); - assert_eq!( - AdditionalConnections::get(test_node(5)), - BTreeSet::from_iter(vec![test_node(15)]) - ); - }); - } - - #[test] - fn reset_well_known_nodes_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::reset_well_known_nodes( - Origin::signed(3), - vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] - ), - BadOrigin - ); - assert_noop!( - NodeAuthorization::reset_well_known_nodes( - Origin::signed(4), - vec![ - (test_node(15), 15), - (test_node(5), 5), - (test_node(20), 20), - (test_node(25), 25), - ] - ), - Error::::TooManyNodes - ); - - assert_ok!( - NodeAuthorization::reset_well_known_nodes( - Origin::signed(4), - vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] - ) - ); - assert_eq!( - WellKnownNodes::get(), - BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(20)]) - ); - assert_eq!(Owners::::get(test_node(5)), 5); - assert_eq!(Owners::::get(test_node(15)), 15); - assert_eq!(Owners::::get(test_node(20)), 20); - }); - } - - #[test] - fn claim_node_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::claim_node(Origin::signed(1), PeerId(vec![1, 2, 3])), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::claim_node(Origin::signed(1), test_node(20)), - Error::::AlreadyClaimed - ); - - assert_ok!(NodeAuthorization::claim_node(Origin::signed(15), test_node(15))); - assert_eq!(Owners::::get(test_node(15)), 15); - }); - } - - #[test] - fn remove_claim_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::remove_claim(Origin::signed(15), PeerId(vec![1, 2, 3])), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::remove_claim(Origin::signed(15), test_node(15)), - Error::::NotClaimed - ); - - assert_noop!( - NodeAuthorization::remove_claim(Origin::signed(15), test_node(20)), - Error::::NotOwner - ); - - assert_noop!( - NodeAuthorization::remove_claim(Origin::signed(20), test_node(20)), - Error::::PermissionDenied - ); - - Owners::::insert(test_node(15), 15); - AdditionalConnections::insert( - test_node(15), - BTreeSet::from_iter(vec![test_node(20)]) - ); - assert_ok!(NodeAuthorization::remove_claim(Origin::signed(15), test_node(15))); - assert!(!Owners::::contains_key(test_node(15))); - assert!(!AdditionalConnections::contains_key(test_node(15))); - }); - } - - #[test] - fn transfer_node_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::transfer_node(Origin::signed(15), PeerId(vec![1, 2, 3]), 10), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::transfer_node(Origin::signed(15), test_node(15), 10), - Error::::NotClaimed - ); - - assert_noop!( - NodeAuthorization::transfer_node(Origin::signed(15), test_node(20), 10), - Error::::NotOwner - ); - - assert_ok!(NodeAuthorization::transfer_node(Origin::signed(20), test_node(20), 15)); - assert_eq!(Owners::::get(test_node(20)), 15); - }); - } - - #[test] - fn add_connections_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::add_connections( - Origin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] - ), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::add_connections( - Origin::signed(15), test_node(15), vec![test_node(5)] - ), - Error::::NotClaimed - ); - - assert_noop!( - NodeAuthorization::add_connections( - Origin::signed(15), test_node(20), vec![test_node(5)] - ), - Error::::NotOwner - ); - - assert_ok!( - NodeAuthorization::add_connections( - Origin::signed(20), - test_node(20), - vec![test_node(15), test_node(5), test_node(25), test_node(20)] - ) - ); - assert_eq!( - AdditionalConnections::get(test_node(20)), - BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) - ); - }); - } - - #[test] - fn remove_connections_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::remove_connections( - Origin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] - ), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::remove_connections( - Origin::signed(15), test_node(15), vec![test_node(5)] - ), - Error::::NotClaimed - ); - - assert_noop!( - NodeAuthorization::remove_connections( - Origin::signed(15), test_node(20), vec![test_node(5)] - ), - Error::::NotOwner - ); - - AdditionalConnections::insert( - test_node(20), - BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) - ); - assert_ok!( - NodeAuthorization::remove_connections( - Origin::signed(20), - test_node(20), - vec![test_node(15), test_node(5)] - ) - ); - assert_eq!( - AdditionalConnections::get(test_node(20)), - BTreeSet::from_iter(vec![test_node(25)]) - ); - }); - } - - #[test] - fn get_authorized_nodes_works() { - new_test_ext().execute_with(|| { - AdditionalConnections::insert( - test_node(20), - BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) - ); - - let mut authorized_nodes = Module::::get_authorized_nodes(&test_node(20)); - authorized_nodes.sort(); - assert_eq!( - authorized_nodes, - vec![test_node(5), test_node(10), test_node(15), test_node(25), test_node(30)] - ); - }); - } -} diff --git a/frame/node-authorization/src/mock.rs b/frame/node-authorization/src/mock.rs new file mode 100644 index 0000000000000..5118f07c7694e --- /dev/null +++ b/frame/node-authorization/src/mock.rs @@ -0,0 +1,106 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for node-authorization pallet. + +use super::*; +use crate as pallet_node_authorization; + +use frame_support::{ + parameter_types, ord_parameter_types, + traits::GenesisBuild, +}; +use frame_system::EnsureSignedBy; +use sp_core::H256; +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + NodeAuthorization: pallet_node_authorization::{ + Pallet, Call, Storage, Config, Event, + }, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} +impl frame_system::Config for Test { + type BaseCallFilter = (); + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = Call; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); +} + +ord_parameter_types! { + pub const One: u64 = 1; + pub const Two: u64 = 2; + pub const Three: u64 = 3; + pub const Four: u64 = 4; +} +parameter_types! { + pub const MaxWellKnownNodes: u32 = 4; + pub const MaxPeerIdLength: u32 = 2; +} +impl Config for Test { + type Event = Event; + type MaxWellKnownNodes = MaxWellKnownNodes; + type MaxPeerIdLength = MaxPeerIdLength; + type AddOrigin = EnsureSignedBy; + type RemoveOrigin = EnsureSignedBy; + type SwapOrigin = EnsureSignedBy; + type ResetOrigin = EnsureSignedBy; + type WeightInfo = (); +} + +pub fn test_node(id: u8) -> PeerId { + PeerId(vec![id]) +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_node_authorization::GenesisConfig:: { + nodes: vec![(test_node(10), 10), (test_node(20), 20), (test_node(30), 30)], + }.assimilate_storage(&mut t).unwrap(); + t.into() +} diff --git a/frame/node-authorization/src/tests.rs b/frame/node-authorization/src/tests.rs new file mode 100644 index 0000000000000..15a286fbc2390 --- /dev/null +++ b/frame/node-authorization/src/tests.rs @@ -0,0 +1,366 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for node-authorization pallet. + +use super::*; +use crate::mock::*; +use frame_support::{assert_ok, assert_noop}; +use sp_runtime::traits::BadOrigin; + +#[test] +fn add_well_known_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::add_well_known_node(Origin::signed(2), test_node(15), 15), + BadOrigin + ); + assert_noop!( + NodeAuthorization::add_well_known_node(Origin::signed(1), PeerId(vec![1, 2, 3]), 15), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(20), 20), + Error::::AlreadyJoined + ); + + assert_ok!( + NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(15), 15) + ); + assert_eq!( + WellKnownNodes::::get(), + BTreeSet::from_iter(vec![test_node(10), test_node(15), test_node(20), test_node(30)]) + ); + assert_eq!(Owners::::get(test_node(10)), Some(10)); + assert_eq!(Owners::::get(test_node(20)), Some(20)); + assert_eq!(Owners::::get(test_node(30)), Some(30)); + assert_eq!(Owners::::get(test_node(15)), Some(15)); + + assert_noop!( + NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(25), 25), + Error::::TooManyNodes + ); + }); +} + +#[test] +fn remove_well_known_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::remove_well_known_node(Origin::signed(3), test_node(20)), + BadOrigin + ); + assert_noop!( + NodeAuthorization::remove_well_known_node(Origin::signed(2), PeerId(vec![1, 2, 3])), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(40)), + Error::::NotExist + ); + + AdditionalConnections::::insert( + test_node(20), + BTreeSet::from_iter(vec![test_node(40)]) + ); + assert!(AdditionalConnections::::contains_key(test_node(20))); + + assert_ok!( + NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(20)) + ); + assert_eq!( + WellKnownNodes::::get(), + BTreeSet::from_iter(vec![test_node(10), test_node(30)]) + ); + assert!(!Owners::::contains_key(test_node(20))); + assert!(!AdditionalConnections::::contains_key(test_node(20))); + }); +} + +#[test] +fn swap_well_known_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(4), test_node(20), test_node(5) + ), + BadOrigin + ); + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), PeerId(vec![1, 2, 3]), test_node(20) + ), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), test_node(20), PeerId(vec![1, 2, 3]) + ), + Error::::PeerIdTooLong + ); + + assert_ok!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), test_node(20), test_node(20) + ) + ); + assert_eq!( + WellKnownNodes::::get(), + BTreeSet::from_iter(vec![test_node(10), test_node(20), test_node(30)]) + ); + + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), test_node(15), test_node(5) + ), + Error::::NotExist + ); + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), test_node(20), test_node(30) + ), + Error::::AlreadyJoined + ); + + AdditionalConnections::::insert( + test_node(20), + BTreeSet::from_iter(vec![test_node(15)]) + ); + assert_ok!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), test_node(20), test_node(5) + ) + ); + assert_eq!( + WellKnownNodes::::get(), + BTreeSet::from_iter(vec![test_node(5), test_node(10), test_node(30)]) + ); + assert!(!Owners::::contains_key(test_node(20))); + assert_eq!(Owners::::get(test_node(5)), Some(20)); + assert!(!AdditionalConnections::::contains_key(test_node(20))); + assert_eq!( + AdditionalConnections::::get(test_node(5)), + BTreeSet::from_iter(vec![test_node(15)]) + ); + }); +} + +#[test] +fn reset_well_known_nodes_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::reset_well_known_nodes( + Origin::signed(3), + vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] + ), + BadOrigin + ); + assert_noop!( + NodeAuthorization::reset_well_known_nodes( + Origin::signed(4), + vec![ + (test_node(15), 15), + (test_node(5), 5), + (test_node(20), 20), + (test_node(25), 25), + ] + ), + Error::::TooManyNodes + ); + + assert_ok!( + NodeAuthorization::reset_well_known_nodes( + Origin::signed(4), + vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] + ) + ); + assert_eq!( + WellKnownNodes::::get(), + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(20)]) + ); + assert_eq!(Owners::::get(test_node(5)), Some(5)); + assert_eq!(Owners::::get(test_node(15)), Some(15)); + assert_eq!(Owners::::get(test_node(20)), Some(20)); + }); +} + +#[test] +fn claim_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::claim_node(Origin::signed(1), PeerId(vec![1, 2, 3])), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::claim_node(Origin::signed(1), test_node(20)), + Error::::AlreadyClaimed + ); + + assert_ok!(NodeAuthorization::claim_node(Origin::signed(15), test_node(15))); + assert_eq!(Owners::::get(test_node(15)), Some(15)); + }); +} + +#[test] +fn remove_claim_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::remove_claim(Origin::signed(15), PeerId(vec![1, 2, 3])), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::remove_claim(Origin::signed(15), test_node(15)), + Error::::NotClaimed + ); + + assert_noop!( + NodeAuthorization::remove_claim(Origin::signed(15), test_node(20)), + Error::::NotOwner + ); + + assert_noop!( + NodeAuthorization::remove_claim(Origin::signed(20), test_node(20)), + Error::::PermissionDenied + ); + + Owners::::insert(test_node(15), 15); + AdditionalConnections::::insert( + test_node(15), + BTreeSet::from_iter(vec![test_node(20)]) + ); + assert_ok!(NodeAuthorization::remove_claim(Origin::signed(15), test_node(15))); + assert!(!Owners::::contains_key(test_node(15))); + assert!(!AdditionalConnections::::contains_key(test_node(15))); + }); +} + +#[test] +fn transfer_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::transfer_node(Origin::signed(15), PeerId(vec![1, 2, 3]), 10), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::transfer_node(Origin::signed(15), test_node(15), 10), + Error::::NotClaimed + ); + + assert_noop!( + NodeAuthorization::transfer_node(Origin::signed(15), test_node(20), 10), + Error::::NotOwner + ); + + assert_ok!(NodeAuthorization::transfer_node(Origin::signed(20), test_node(20), 15)); + assert_eq!(Owners::::get(test_node(20)), Some(15)); + }); +} + +#[test] +fn add_connections_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::add_connections( + Origin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] + ), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::add_connections( + Origin::signed(15), test_node(15), vec![test_node(5)] + ), + Error::::NotClaimed + ); + + assert_noop!( + NodeAuthorization::add_connections( + Origin::signed(15), test_node(20), vec![test_node(5)] + ), + Error::::NotOwner + ); + + assert_ok!( + NodeAuthorization::add_connections( + Origin::signed(20), + test_node(20), + vec![test_node(15), test_node(5), test_node(25), test_node(20)] + ) + ); + assert_eq!( + AdditionalConnections::::get(test_node(20)), + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) + ); + }); +} + +#[test] +fn remove_connections_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::remove_connections( + Origin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] + ), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::remove_connections( + Origin::signed(15), test_node(15), vec![test_node(5)] + ), + Error::::NotClaimed + ); + + assert_noop!( + NodeAuthorization::remove_connections( + Origin::signed(15), test_node(20), vec![test_node(5)] + ), + Error::::NotOwner + ); + + AdditionalConnections::::insert( + test_node(20), + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) + ); + assert_ok!( + NodeAuthorization::remove_connections( + Origin::signed(20), + test_node(20), + vec![test_node(15), test_node(5)] + ) + ); + assert_eq!( + AdditionalConnections::::get(test_node(20)), + BTreeSet::from_iter(vec![test_node(25)]) + ); + }); +} + +#[test] +fn get_authorized_nodes_works() { + new_test_ext().execute_with(|| { + AdditionalConnections::::insert( + test_node(20), + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) + ); + + let mut authorized_nodes = Pallet::::get_authorized_nodes(&test_node(20)); + authorized_nodes.sort(); + assert_eq!( + authorized_nodes, + vec![test_node(5), test_node(10), test_node(15), test_node(25), test_node(30)] + ); + }); +} diff --git a/frame/node-authorization/src/weights.rs b/frame/node-authorization/src/weights.rs new file mode 100644 index 0000000000000..3d01e40d67ac3 --- /dev/null +++ b/frame/node-authorization/src/weights.rs @@ -0,0 +1,48 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_node_authorization + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +pub trait WeightInfo { + fn add_well_known_node() -> Weight; + fn remove_well_known_node() -> Weight; + fn swap_well_known_node() -> Weight; + fn reset_well_known_nodes() -> Weight; + fn claim_node() -> Weight; + fn remove_claim() -> Weight; + fn transfer_node() -> Weight; + fn add_connections() -> Weight; + fn remove_connections() -> Weight; +} + +impl WeightInfo for () { + fn add_well_known_node() -> Weight { 50_000_000 } + fn remove_well_known_node() -> Weight { 50_000_000 } + fn swap_well_known_node() -> Weight { 50_000_000 } + fn reset_well_known_nodes() -> Weight { 50_000_000 } + fn claim_node() -> Weight { 50_000_000 } + fn remove_claim() -> Weight { 50_000_000 } + fn transfer_node() -> Weight { 50_000_000 } + fn add_connections() -> Weight { 50_000_000 } + fn remove_connections() -> Weight { 50_000_000 } +} diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index 6be2787734a4f..6c249ebcc61d8 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -27,7 +27,7 @@ pallet-staking = { version = "3.0.0", default-features = false, features = ["run sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } sp-staking = { version = "3.0.0", default-features = false, path = "../../../primitives/staking" } sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } -sp-election-providers = { version = "3.0.0", default-features = false, path = "../../../primitives/election-providers" } +frame-election-provider-support = { version = "3.0.0", default-features = false, path = "../../election-provider-support" } [dev-dependencies] pallet-staking-reward-curve = { version = "3.0.0", path = "../../staking/reward-curve" } @@ -51,7 +51,7 @@ std = [ "pallet-staking/std", "sp-runtime/std", "sp-staking/std", - "sp-election-providers/std", + "frame-election-provider-support/std", "sp-std/std", "codec/std", ] diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 0ceebaecd91ae..08517a4ac8df0 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -24,11 +24,14 @@ mod mock; use sp_std::prelude::*; use sp_std::vec; -use frame_system::{RawOrigin, Module as System, Config as SystemConfig}; +use frame_system::{RawOrigin, Pallet as System, Config as SystemConfig}; use frame_benchmarking::{benchmarks, account, impl_benchmark_test_suite}; use frame_support::traits::{Currency, OnInitialize, ValidatorSet, ValidatorSetWithIdentification}; -use sp_runtime::{Perbill, traits::{Convert, StaticLookup, Saturating, UniqueSaturatedInto}}; +use sp_runtime::{ + Perbill, + traits::{Convert, StaticLookup, Saturating, UniqueSaturatedInto}, +}; use sp_staking::offence::{ReportOffence, Offence, OffenceDetails}; use pallet_balances::Config as BalancesConfig; @@ -39,8 +42,8 @@ use pallet_offences::{Config as OffencesConfig, Module as Offences}; use pallet_session::historical::{Config as HistoricalConfig, IdentificationTuple}; use pallet_session::{Config as SessionConfig, SessionManager}; use pallet_staking::{ - Module as Staking, Config as StakingConfig, RewardDestination, ValidatorPrefs, - Exposure, IndividualExposure, ElectionStatus, MAX_NOMINATIONS, Event as StakingEvent + Module as Staking, Config as StakingConfig, RewardDestination, ValidatorPrefs, Exposure, + IndividualExposure, Event as StakingEvent, }; const SEED: u32 = 0; @@ -50,7 +53,7 @@ const MAX_OFFENDERS: u32 = 100; const MAX_NOMINATORS: u32 = 100; const MAX_DEFERRED_OFFENCES: u32 = 100; -pub struct Module(Offences); +pub struct Pallet(Offences); pub trait Config: SessionConfig @@ -236,7 +239,7 @@ benchmarks! { let r in 1 .. MAX_REPORTERS; // we skip 1 offender, because in such case there is no slashing let o in 2 .. MAX_OFFENDERS; - let n in 0 .. MAX_NOMINATORS.min(MAX_NOMINATIONS as u32); + let n in 0 .. MAX_NOMINATORS.min(::MAX_NOMINATIONS); // Make r reporters let mut reporters = vec![]; @@ -310,7 +313,7 @@ benchmarks! { } report_offence_grandpa { - let n in 0 .. MAX_NOMINATORS.min(MAX_NOMINATIONS as u32); + let n in 0 .. MAX_NOMINATORS.min(::MAX_NOMINATIONS); // for grandpa equivocation reports the number of reporters // and offenders is always 1 @@ -346,7 +349,7 @@ benchmarks! { } report_offence_babe { - let n in 0 .. MAX_NOMINATORS.min(MAX_NOMINATIONS as u32); + let n in 0 .. MAX_NOMINATORS.min(::MAX_NOMINATIONS); // for babe equivocation reports the number of reporters // and offenders is always 1 @@ -386,8 +389,6 @@ benchmarks! { let o = 10; let n = 100; - Staking::::put_election_status(ElectionStatus::Closed); - let mut deferred_offences = vec![]; let offenders = make_offenders::(o, n)?.0; let offence_details = offenders.into_iter() @@ -421,7 +422,7 @@ benchmarks! { } impl_benchmark_test_suite!( - Module, + Pallet, crate::mock::new_test_ext(), crate::mock::Test, ); diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 54d649381eea6..223d6d4d477a6 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -29,7 +29,7 @@ use sp_runtime::{ traits::IdentityLookup, testing::{Header, UintAuthorityId}, }; -use sp_election_providers::onchain; +use frame_election_provider_support::onchain; use pallet_session::historical as pallet_session_historical; type AccountId = u64; @@ -152,13 +152,15 @@ pub type Extrinsic = sp_runtime::testing::TestXt; impl onchain::Config for Test { type AccountId = AccountId; type BlockNumber = BlockNumber; + type BlockWeights = (); type Accuracy = Perbill; type DataProvider = Staking; } impl pallet_staking::Config for Test { + const MAX_NOMINATIONS: u32 = 16; type Currency = Balances; - type UnixTime = pallet_timestamp::Module; + type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); type Event = Event; @@ -169,15 +171,9 @@ impl pallet_staking::Config for Test { type SlashCancelOrigin = frame_system::EnsureRoot; type BondingDuration = (); type SessionInterface = Self; - type RewardCurve = RewardCurve; + type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type ElectionLookahead = (); - type Call = Call; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type UnsignedPriority = (); - type MaxIterations = (); - type MinSolutionScoreBump = (); - type OffchainSolutionWeightLimit = (); type ElectionProvider = onchain::OnChainSequentialPhragmen; type WeightInfo = (); } @@ -219,13 +215,13 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Staking: pallet_staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, - ImOnline: pallet_im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, - Offences: pallet_offences::{Module, Call, Storage, Event}, - Historical: pallet_session_historical::{Module}, + System: system::{Pallet, Call, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + ImOnline: pallet_im_online::{Pallet, Call, Storage, Event, ValidateUnsigned, Config}, + Offences: pallet_offences::{Pallet, Call, Storage, Event}, + Historical: pallet_session_historical::{Pallet}, } ); diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index c47a9cf943c18..ab45bb0837b56 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -91,8 +91,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Offences: offences::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Offences: offences::{Pallet, Call, Storage, Event}, } ); diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index 130c980011871..4027fcbafa0d6 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -23,12 +23,12 @@ use super::*; use frame_system::{RawOrigin, EventRecord}; use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; -use crate::Module as Proxy; +use crate::Pallet as Proxy; const SEED: u32 = 0; fn assert_last_event(generic_event: ::Event) { - let events = frame_system::Module::::events(); + let events = frame_system::Pallet::::events(); let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; @@ -90,7 +90,7 @@ benchmarks! { let call: ::Call = frame_system::Call::::remark(vec![]).into(); }: _(RawOrigin::Signed(caller), real, Some(T::ProxyType::default()), Box::new(call)) verify { - assert_last_event::(RawEvent::ProxyExecuted(Ok(())).into()) + assert_last_event::(Event::ProxyExecuted(Ok(())).into()) } proxy_announced { @@ -111,7 +111,7 @@ benchmarks! { add_announcements::(a, Some(delegate.clone()), None)?; }: _(RawOrigin::Signed(caller), delegate, real, Some(T::ProxyType::default()), Box::new(call)) verify { - assert_last_event::(RawEvent::ProxyExecuted(Ok(())).into()) + assert_last_event::(Event::ProxyExecuted(Ok(())).into()) } remove_announcement { @@ -169,7 +169,7 @@ benchmarks! { let call_hash = T::CallHasher::hash_of(&call); }: _(RawOrigin::Signed(caller.clone()), real.clone(), call_hash) verify { - assert_last_event::(RawEvent::Announced(real, caller, call_hash).into()); + assert_last_event::(Event::Announced(real, caller, call_hash).into()); } add_proxy { @@ -219,8 +219,8 @@ benchmarks! { 0 ) verify { - let anon_account = Module::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); - assert_last_event::(RawEvent::AnonymousCreated( + let anon_account = Pallet::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); + assert_last_event::(Event::AnonymousCreated( anon_account, caller, T::ProxyType::default(), @@ -233,15 +233,15 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - Module::::anonymous( + Pallet::::anonymous( RawOrigin::Signed(whitelisted_caller()).into(), T::ProxyType::default(), T::BlockNumber::zero(), 0 )?; - let height = system::Module::::block_number(); - let ext_index = system::Module::::extrinsic_index().unwrap_or(0); - let anon = Module::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); + let height = system::Pallet::::block_number(); + let ext_index = system::Pallet::::extrinsic_index().unwrap_or(0); + let anon = Pallet::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); add_proxies::(p, Some(anon.clone()))?; ensure!(Proxies::::contains_key(&anon), "anon proxy not created"); diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 1e5aaadcc62d3..5e63e0cd8d3d9 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -15,25 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Proxy Module -//! A module allowing accounts to give permission to other accounts to dispatch types of calls from +//! # Proxy Pallet +//! A pallet allowing accounts to give permission to other accounts to dispatch types of calls from //! their signed origin. //! -//! The accounts to which permission is delegated may be requied to announce the action that they +//! The accounts to which permission is delegated may be required to announce the action that they //! wish to execute some duration prior to execution happens. In this case, the target account may //! reject the announcement and in doing so, veto the execution. //! -//! - [`proxy::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) -//! -//! ## Overview -//! -//! ## Interface -//! -//! ### Dispatchable Functions -//! -//! [`Call`]: ./enum.Call.html -//! [`Config`]: ./trait.Config.html +//! - [`Config`] +//! - [`Call`] // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -45,73 +36,25 @@ pub mod weights; use sp_std::prelude::*; use codec::{Encode, Decode}; use sp_io::hashing::blake2_256; -use sp_runtime::{DispatchResult, traits::{Dispatchable, Zero, Hash, Member, Saturating}}; +use sp_runtime::{ + DispatchResult, + traits::{Dispatchable, Zero, Hash, Saturating} +}; use frame_support::{ - decl_module, decl_event, decl_error, decl_storage, Parameter, ensure, RuntimeDebug, traits::{ - Get, ReservableCurrency, Currency, InstanceFilter, OriginTrait, IsType, IsSubType, - }, weights::{Weight, GetDispatchInfo}, dispatch::PostDispatchInfo, storage::IterableStorageMap, + RuntimeDebug, ensure, + dispatch::{DispatchResultWithPostInfo, PostDispatchInfo}, + traits::{Get, ReservableCurrency, Currency, InstanceFilter, OriginTrait, IsType, IsSubType}, + weights::{Weight, GetDispatchInfo} }; -use frame_system::{self as system, ensure_signed}; +use frame_system::{self as system}; use frame_support::dispatch::DispatchError; pub use weights::WeightInfo; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; - -/// Configuration trait. -pub trait Config: frame_system::Config { - /// The overarching event type. - type Event: From> + Into<::Event>; +pub use pallet::*; - /// The overarching call type. - type Call: Parameter + Dispatchable - + GetDispatchInfo + From> + IsSubType> - + IsType<::Call>; - - /// The currency mechanism. - type Currency: ReservableCurrency; - - /// A kind of proxy; specified with the proxy and passed in to the `IsProxyable` fitler. - /// The instance filter determines whether a given call may be proxied under this type. - /// - /// IMPORTANT: `Default` must be provided and MUST BE the the *most permissive* value. - type ProxyType: Parameter + Member + Ord + PartialOrd + InstanceFilter<::Call> - + Default; - - /// The base amount of currency needed to reserve for creating a proxy. - /// - /// This is held for an additional storage item whose value size is - /// `sizeof(Balance)` bytes and whose key size is `sizeof(AccountId)` bytes. - type ProxyDepositBase: Get>; - - /// The amount of currency needed per proxy added. - /// - /// This is held for adding 32 bytes plus an instance of `ProxyType` more into a pre-existing - /// storage value. - type ProxyDepositFactor: Get>; - - /// The maximum amount of proxies allowed for a single account. - type MaxProxies: Get; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; - - /// The maximum amount of time-delayed announcements that are allowed to be pending. - type MaxPending: Get; - - /// The type of hash used for hashing the call. - type CallHasher: Hash; - - /// The base amount of currency needed to reserve for creating an announcement. - /// - /// This is held when a new storage item holding a `Balance` is created (typically 16 bytes). - type AnnouncementDepositBase: Get>; +type CallHashOf = <::CallHasher as Hash>::Output; - /// The amount of currency needed per announcement made. - /// - /// This is held for adding an `AccountId`, `Hash` and `BlockNumber` (typically 68 bytes) - /// into a pre-existing storage value. - type AnnouncementDepositFactor: Get>; -} +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// The parameters under which a particular account has a proxy relationship with some other /// account. @@ -137,84 +80,85 @@ pub struct Announcement { height: BlockNumber, } -type CallHashOf = <::CallHasher as Hash>::Output; - -decl_storage! { - trait Store for Module as Proxy { - /// The set of account proxies. Maps the account which has delegated to the accounts - /// which are being delegated to, together with the amount held on deposit. - pub Proxies get(fn proxies): map hasher(twox_64_concat) T::AccountId - => (Vec>, BalanceOf); +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::{*, DispatchResult}; - /// The announcements made by the proxy (key). - pub Announcements get(fn announcements): map hasher(twox_64_concat) T::AccountId - => (Vec, T::BlockNumber>>, BalanceOf); - } -} + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); -decl_error! { - pub enum Error for Module { - /// There are too many proxies registered or too many announcements pending. - TooMany, - /// Proxy registration not found. - NotFound, - /// Sender is not a proxy of the account to be proxied. - NotProxy, - /// A call which is incompatible with the proxy type's filter was attempted. - Unproxyable, - /// Account is already a proxy. - Duplicate, - /// Call may not be made by proxy because it may escalate its privileges. - NoPermission, - /// Announcement, if made at all, was made too recently. - Unannounced, - /// Cannot add self as proxy. - NoSelfProxy, - } -} + /// Configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; -decl_event! { - /// Events type. - pub enum Event where - AccountId = ::AccountId, - ProxyType = ::ProxyType, - Hash = CallHashOf, - { - /// A proxy was executed correctly, with the given \[result\]. - ProxyExecuted(DispatchResult), - /// Anonymous account has been created by new proxy with given - /// disambiguation index and proxy type. \[anonymous, who, proxy_type, disambiguation_index\] - AnonymousCreated(AccountId, AccountId, ProxyType, u16), - /// An announcement was placed to make a call in the future. \[real, proxy, call_hash\] - Announced(AccountId, AccountId, Hash), - } -} + /// The overarching call type. + type Call: Parameter + Dispatchable + + GetDispatchInfo + From> + IsSubType> + + IsType<::Call>; -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; + /// The currency mechanism. + type Currency: ReservableCurrency; - /// Deposit one of this module's events by using the default implementation. - fn deposit_event() = default; + /// A kind of proxy; specified with the proxy and passed in to the `IsProxyable` fitler. + /// The instance filter determines whether a given call may be proxied under this type. + /// + /// IMPORTANT: `Default` must be provided and MUST BE the the *most permissive* value. + type ProxyType: Parameter + Member + Ord + PartialOrd + InstanceFilter<::Call> + + Default; /// The base amount of currency needed to reserve for creating a proxy. - const ProxyDepositBase: BalanceOf = T::ProxyDepositBase::get(); + /// + /// This is held for an additional storage item whose value size is + /// `sizeof(Balance)` bytes and whose key size is `sizeof(AccountId)` bytes. + #[pallet::constant] + type ProxyDepositBase: Get>; /// The amount of currency needed per proxy added. - const ProxyDepositFactor: BalanceOf = T::ProxyDepositFactor::get(); + /// + /// This is held for adding 32 bytes plus an instance of `ProxyType` more into a pre-existing + /// storage value. Thus, when configuring `ProxyDepositFactor` one should take into account + /// `32 + proxy_type.encode().len()` bytes of data. + #[pallet::constant] + type ProxyDepositFactor: Get>; /// The maximum amount of proxies allowed for a single account. - const MaxProxies: u16 = T::MaxProxies::get(); + #[pallet::constant] + type MaxProxies: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; - /// `MaxPending` metadata shadow. - const MaxPending: u32 = T::MaxPending::get(); + /// The maximum amount of time-delayed announcements that are allowed to be pending. + #[pallet::constant] + type MaxPending: Get; + + /// The type of hash used for hashing the call. + type CallHasher: Hash; + + /// The base amount of currency needed to reserve for creating an announcement. + /// + /// This is held when a new storage item holding a `Balance` is created (typically 16 bytes). + #[pallet::constant] + type AnnouncementDepositBase: Get>; - /// `AnnouncementDepositBase` metadata shadow. - const AnnouncementDepositBase: BalanceOf = T::AnnouncementDepositBase::get(); + /// The amount of currency needed per announcement made. + /// + /// This is held for adding an `AccountId`, `Hash` and `BlockNumber` (typically 68 bytes) + /// into a pre-existing storage value. + #[pallet::constant] + type AnnouncementDepositFactor: Get>; + } - /// `AnnouncementDepositFactor` metadata shadow. - const AnnouncementDepositFactor: BalanceOf = T::AnnouncementDepositFactor::get(); + #[pallet::hooks] + impl Hooks> for Pallet {} + #[pallet::call] + impl Pallet { /// Dispatch the given `call` from an account that the sender is authorised for through /// `add_proxy`. /// @@ -230,24 +174,27 @@ decl_module! { /// # /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = { + #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy(T::MaxProxies::get().into()) .saturating_add(di.weight) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)), di.class) - }] - fn proxy(origin, + })] + pub(super) fn proxy( + origin: OriginFor, real: T::AccountId, force_proxy_type: Option, call: Box<::Call>, - ) { + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let def = Self::find_proxy(&real, &who, force_proxy_type)?; ensure!(def.delay.is_zero(), Error::::Unannounced); Self::do_proxy(def, real, *call); + + Ok(().into()) } /// Register a proxy account for the sender that is able to make calls on its behalf. @@ -263,12 +210,13 @@ decl_module! { /// # /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::WeightInfo::add_proxy(T::MaxProxies::get().into())] - fn add_proxy(origin, + #[pallet::weight(T::WeightInfo::add_proxy(T::MaxProxies::get().into()))] + pub(super) fn add_proxy( + origin: OriginFor, delegate: T::AccountId, proxy_type: T::ProxyType, delay: T::BlockNumber, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; Self::add_proxy_delegate(&who, delegate, proxy_type, delay) } @@ -284,12 +232,13 @@ decl_module! { /// # /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::WeightInfo::remove_proxy(T::MaxProxies::get().into())] - fn remove_proxy(origin, + #[pallet::weight(T::WeightInfo::remove_proxy(T::MaxProxies::get().into()))] + pub(super) fn remove_proxy( + origin: OriginFor, delegate: T::AccountId, proxy_type: T::ProxyType, delay: T::BlockNumber, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; Self::remove_proxy_delegate(&who, delegate, proxy_type, delay) } @@ -304,11 +253,13 @@ decl_module! { /// # /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::WeightInfo::remove_proxies(T::MaxProxies::get().into())] - fn remove_proxies(origin) { + #[pallet::weight(T::WeightInfo::remove_proxies(T::MaxProxies::get().into()))] + pub(super) fn remove_proxies(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let (_, old_deposit) = Proxies::::take(&who); T::Currency::unreserve(&who, old_deposit); + + Ok(().into()) } /// Spawn a fresh new account that is guaranteed to be otherwise inaccessible, and @@ -334,8 +285,13 @@ decl_module! { /// Weight is a function of the number of proxies the user has (P). /// # /// TODO: Might be over counting 1 read - #[weight = T::WeightInfo::anonymous(T::MaxProxies::get().into())] - fn anonymous(origin, proxy_type: T::ProxyType, delay: T::BlockNumber, index: u16) { + #[pallet::weight(T::WeightInfo::anonymous(T::MaxProxies::get().into()))] + pub(super) fn anonymous( + origin: OriginFor, + proxy_type: T::ProxyType, + delay: T::BlockNumber, + index: u16 + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let anonymous = Self::anonymous_account(&who, &proxy_type, index, None); @@ -348,7 +304,9 @@ decl_module! { delay, }; Proxies::::insert(&anonymous, (vec![proxy_def], deposit)); - Self::deposit_event(RawEvent::AnonymousCreated(anonymous, who, proxy_type, index)); + Self::deposit_event(Event::AnonymousCreated(anonymous, who, proxy_type, index)); + + Ok(().into()) } /// Removes a previously spawned anonymous proxy. @@ -371,14 +329,15 @@ decl_module! { /// # /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::WeightInfo::kill_anonymous(T::MaxProxies::get().into())] - fn kill_anonymous(origin, + #[pallet::weight(T::WeightInfo::kill_anonymous(T::MaxProxies::get().into()))] + pub(super) fn kill_anonymous( + origin: OriginFor, spawner: T::AccountId, proxy_type: T::ProxyType, index: u16, - #[compact] height: T::BlockNumber, - #[compact] ext_index: u32, - ) { + #[pallet::compact] height: T::BlockNumber, + #[pallet::compact] ext_index: u32, + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let when = (height, ext_index); @@ -387,6 +346,8 @@ decl_module! { let (_, deposit) = Proxies::::take(&who); T::Currency::unreserve(&spawner, deposit); + + Ok(().into()) } /// Publish the hash of a proxy-call that will be made in the future. @@ -410,8 +371,12 @@ decl_module! { /// - A: the number of announcements made. /// - P: the number of proxies the user has. /// # - #[weight = T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get().into())] - fn announce(origin, real: T::AccountId, call_hash: CallHashOf) { + #[pallet::weight(T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get().into()))] + pub(super) fn announce( + origin: OriginFor, + real: T::AccountId, + call_hash: CallHashOf + ) -> DispatchResultWithPostInfo{ let who = ensure_signed(origin)?; Proxies::::get(&real).0.into_iter() .find(|x| &x.delegate == &who) @@ -420,7 +385,7 @@ decl_module! { let announcement = Announcement { real: real.clone(), call_hash: call_hash.clone(), - height: system::Module::::block_number(), + height: system::Pallet::::block_number(), }; Announcements::::try_mutate(&who, |(ref mut pending, ref mut deposit)| { @@ -435,7 +400,9 @@ decl_module! { ).map(|d| d.expect("Just pushed; pending.len() > 0; rejig_deposit returns Some; qed")) .map(|d| *deposit = d) })?; - Self::deposit_event(RawEvent::Announced(real, who, call_hash)); + Self::deposit_event(Event::Announced(real, who, call_hash)); + + Ok(().into()) } /// Remove a given announcement. @@ -454,10 +421,18 @@ decl_module! { /// - A: the number of announcements made. /// - P: the number of proxies the user has. /// # - #[weight = T::WeightInfo::remove_announcement(T::MaxPending::get(), T::MaxProxies::get().into())] - fn remove_announcement(origin, real: T::AccountId, call_hash: CallHashOf) { + #[pallet::weight( + T::WeightInfo::remove_announcement(T::MaxPending::get(), T::MaxProxies::get().into()) + )] + pub(super) fn remove_announcement( + origin: OriginFor, + real: T::AccountId, + call_hash: CallHashOf + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; Self::edit_announcements(&who, |ann| ann.real != real || ann.call_hash != call_hash)?; + + Ok(().into()) } /// Remove the given announcement of a delegate. @@ -476,13 +451,21 @@ decl_module! { /// - A: the number of announcements made. /// - P: the number of proxies the user has. /// # - #[weight = T::WeightInfo::reject_announcement(T::MaxPending::get(), T::MaxProxies::get().into())] - fn reject_announcement(origin, delegate: T::AccountId, call_hash: CallHashOf) { + #[pallet::weight( + T::WeightInfo::reject_announcement(T::MaxPending::get(), T::MaxProxies::get().into()) + )] + pub(super) fn reject_announcement( + origin: OriginFor, + delegate: T::AccountId, + call_hash: CallHashOf + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; Self::edit_announcements(&delegate, |ann| ann.real != who || ann.call_hash != call_hash)?; + + Ok(().into()) } - /// Dispatch the given `call` from an account that the sender is authorised for through + /// Dispatch the given `call` from an account that the sender is authorized for through /// `add_proxy`. /// /// Removes any corresponding announcement(s). @@ -499,35 +482,100 @@ decl_module! { /// - A: the number of announcements made. /// - P: the number of proxies the user has. /// # - #[weight = { + #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy_announced(T::MaxPending::get(), T::MaxProxies::get().into()) .saturating_add(di.weight) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)), di.class) - }] - fn proxy_announced(origin, + })] + pub(super) fn proxy_announced( + origin: OriginFor, delegate: T::AccountId, real: T::AccountId, force_proxy_type: Option, call: Box<::Call>, - ) { + ) -> DispatchResultWithPostInfo { ensure_signed(origin)?; let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; let call_hash = T::CallHasher::hash_of(&call); - let now = system::Module::::block_number(); + let now = system::Pallet::::block_number(); Self::edit_announcements(&delegate, |ann| ann.real != real || ann.call_hash != call_hash || now.saturating_sub(ann.height) < def.delay ).map_err(|_| Error::::Unannounced)?; Self::do_proxy(def, real, *call); + + Ok(().into()) } } + + #[pallet::event] + #[pallet::metadata(T::AccountId = "AccountId", T::ProxyType = "ProxyType", CallHashOf = "Hash")] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event + { + /// A proxy was executed correctly, with the given \[result\]. + ProxyExecuted(DispatchResult), + /// Anonymous account has been created by new proxy with given + /// disambiguation index and proxy type. \[anonymous, who, proxy_type, disambiguation_index\] + AnonymousCreated(T::AccountId, T::AccountId, T::ProxyType, u16), + /// An announcement was placed to make a call in the future. \[real, proxy, call_hash\] + Announced(T::AccountId, T::AccountId, CallHashOf), + } + + /// Old name generated by `decl_event`. + #[deprecated(note="use `Event` instead")] + pub type RawEvent = Event; + + #[pallet::error] + pub enum Error { + /// There are too many proxies registered or too many announcements pending. + TooMany, + /// Proxy registration not found. + NotFound, + /// Sender is not a proxy of the account to be proxied. + NotProxy, + /// A call which is incompatible with the proxy type's filter was attempted. + Unproxyable, + /// Account is already a proxy. + Duplicate, + /// Call may not be made by proxy because it may escalate its privileges. + NoPermission, + /// Announcement, if made at all, was made too recently. + Unannounced, + /// Cannot add self as proxy. + NoSelfProxy, + } + + /// The set of account proxies. Maps the account which has delegated to the accounts + /// which are being delegated to, together with the amount held on deposit. + #[pallet::storage] + #[pallet::getter(fn proxies)] + pub type Proxies = StorageMap< + _, + Twox64Concat, + T::AccountId, + (Vec>, BalanceOf), + ValueQuery + >; + + /// The announcements made by the proxy (key). + #[pallet::storage] + #[pallet::getter(fn announcements)] + pub type Announcements = StorageMap< + _, + Twox64Concat, + T::AccountId, + (Vec, T::BlockNumber>>, BalanceOf), + ValueQuery + >; + } -impl Module { +impl Pallet { /// Calculate the address of an anonymous account. /// @@ -547,8 +595,8 @@ impl Module { maybe_when: Option<(T::BlockNumber, u32)>, ) -> T::AccountId { let (height, ext_index) = maybe_when.unwrap_or_else(|| ( - system::Module::::block_number(), - system::Module::::extrinsic_index().unwrap_or_default() + system::Pallet::::block_number(), + system::Pallet::::extrinsic_index().unwrap_or_default() )); let entropy = (b"modlpy/proxy____", who, height, ext_index, proxy_type, index) .using_encoded(blake2_256); @@ -568,7 +616,7 @@ impl Module { delegatee: T::AccountId, proxy_type: T::ProxyType, delay: T::BlockNumber, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { ensure!(delegator != &delegatee, Error::::NoSelfProxy); Proxies::::try_mutate(delegator, |(ref mut proxies, ref mut deposit)| { ensure!(proxies.len() < T::MaxProxies::get() as usize, Error::::TooMany); @@ -582,7 +630,7 @@ impl Module { T::Currency::unreserve(delegator, *deposit - new_deposit); } *deposit = new_deposit; - Ok(()) + Ok(().into()) }) } @@ -599,7 +647,7 @@ impl Module { delegatee: T::AccountId, proxy_type: T::ProxyType, delay: T::BlockNumber, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { Proxies::::try_mutate_exists(delegator, |x| { let (mut proxies, old_deposit) = x.take().ok_or(Error::::NotFound)?; let proxy_def = ProxyDefinition { delegate: delegatee, proxy_type, delay }; @@ -614,7 +662,7 @@ impl Module { if !proxies.is_empty() { *x = Some((proxies, new_deposit)) } - Ok(()) + Ok(().into()) }) } @@ -701,7 +749,7 @@ impl Module { } }); let e = call.dispatch(origin); - Self::deposit_event(RawEvent::ProxyExecuted(e.map(|_| ()).map_err(|e| e.error))); + Self::deposit_event(Event::ProxyExecuted(e.map(|_| ()).map_err(|e| e.error))); } } diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index b31ef1dfdb2fe..797a5ee3d4694 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -38,10 +38,10 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Proxy: proxy::{Module, Call, Storage, Event}, - Utility: pallet_utility::{Module, Call, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Proxy: proxy::{Pallet, Call, Storage, Event}, + Utility: pallet_utility::{Pallet, Call, Event}, } ); @@ -150,6 +150,7 @@ use pallet_balances::Error as BalancesError; use pallet_balances::Event as BalancesEvent; use pallet_utility::Call as UtilityCall; use pallet_utility::Event as UtilityEvent; +use super::Event as ProxyEvent; use super::Call as ProxyCall; pub fn new_test_ext() -> sp_io::TestExternalities { @@ -163,7 +164,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { } fn last_event() -> Event { - system::Module::::events().pop().expect("Event expected").event + system::Pallet::::events().pop().expect("Event expected").event } fn expect_event>(e: E) { @@ -171,7 +172,7 @@ fn expect_event>(e: E) { } fn last_events(n: usize) -> Vec { - system::Module::::events().into_iter().rev().take(n).rev().map(|e| e.event).collect() + system::Pallet::::events().into_iter().rev().take(n).rev().map(|e| e.event).collect() } fn expect_events(e: Vec) { @@ -270,7 +271,7 @@ fn delayed_requires_pre_announcement() { assert_noop!(Proxy::proxy_announced(Origin::signed(0), 2, 1, None, call.clone()), e); let call_hash = BlakeTwo256::hash_of(&call); assert_ok!(Proxy::announce(Origin::signed(2), 1, call_hash)); - system::Module::::set_block_number(2); + system::Pallet::::set_block_number(2); assert_ok!(Proxy::proxy_announced(Origin::signed(0), 2, 1, None, call.clone())); }); } @@ -288,7 +289,7 @@ fn proxy_announced_removes_announcement_and_returns_deposit() { let e = Error::::Unannounced; assert_noop!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone()), e); - system::Module::::set_block_number(2); + system::Pallet::::set_block_number(2); assert_ok!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone())); assert_eq!(Announcements::::get(3), (vec![Announcement { real: 2, @@ -309,11 +310,11 @@ fn filtering_works() { let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Ok(()))); + expect_event(ProxyEvent::ProxyExecuted(Ok(()))); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Ok(()))); + expect_event(ProxyEvent::ProxyExecuted(Ok(()))); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + expect_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); let derivative_id = Utility::derivative_account_id(1, 0); assert!(Balances::mutate_account(&derivative_id, |a| a.free = 1000).is_ok()); @@ -321,42 +322,42 @@ fn filtering_works() { let call = Box::new(Call::Utility(UtilityCall::as_derivative(0, inner.clone()))); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Ok(()))); + expect_event(ProxyEvent::ProxyExecuted(Ok(()))); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + expect_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + expect_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); let call = Box::new(Call::Utility(UtilityCall::batch(vec![*inner]))); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_events(vec![UtilityEvent::BatchCompleted.into(), RawEvent::ProxyExecuted(Ok(())).into()]); + expect_events(vec![UtilityEvent::BatchCompleted.into(), ProxyEvent::ProxyExecuted(Ok(())).into()]); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + expect_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchInterrupted(0, DispatchError::BadOrigin).into(), - RawEvent::ProxyExecuted(Ok(())).into(), + ProxyEvent::ProxyExecuted(Ok(())).into(), ]); let inner = Box::new(Call::Proxy(ProxyCall::add_proxy(5, ProxyType::Any, 0))); let call = Box::new(Call::Utility(UtilityCall::batch(vec![*inner]))); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_events(vec![UtilityEvent::BatchCompleted.into(), RawEvent::ProxyExecuted(Ok(())).into()]); + expect_events(vec![UtilityEvent::BatchCompleted.into(), ProxyEvent::ProxyExecuted(Ok(())).into()]); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + expect_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchInterrupted(0, DispatchError::BadOrigin).into(), - RawEvent::ProxyExecuted(Ok(())).into(), + ProxyEvent::ProxyExecuted(Ok(())).into(), ]); let call = Box::new(Call::Proxy(ProxyCall::remove_proxies())); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + expect_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + expect_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_events(vec![BalancesEvent::::Unreserved(1, 5).into(), RawEvent::ProxyExecuted(Ok(())).into()]); + expect_events(vec![BalancesEvent::::Unreserved(1, 5).into(), ProxyEvent::ProxyExecuted(Ok(())).into()]); }); } @@ -411,18 +412,18 @@ fn proxying_works() { Error::::NotProxy ); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Ok(()))); + expect_event(ProxyEvent::ProxyExecuted(Ok(()))); assert_eq!(Balances::free_balance(6), 1); let call = Box::new(Call::System(SystemCall::set_code(vec![]))); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + expect_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); let call = Box::new(Call::Balances(BalancesCall::transfer_keep_alive(6, 1))); assert_ok!(Call::Proxy(super::Call::proxy(1, None, call.clone())).dispatch(Origin::signed(2))); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + expect_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Ok(()))); + expect_event(ProxyEvent::ProxyExecuted(Ok(()))); assert_eq!(Balances::free_balance(6), 2); }); } @@ -432,7 +433,7 @@ fn anonymous_works() { new_test_ext().execute_with(|| { assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); let anon = Proxy::anonymous_account(&1, &ProxyType::Any, 0, None); - expect_event(RawEvent::AnonymousCreated(anon.clone(), 1, ProxyType::Any, 0)); + expect_event(ProxyEvent::AnonymousCreated(anon.clone(), 1, ProxyType::Any, 0)); // other calls to anonymous allowed as long as they're not exactly the same. assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::JustTransfer, 0, 0)); @@ -449,13 +450,13 @@ fn anonymous_works() { let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); assert_ok!(Balances::transfer(Origin::signed(3), anon, 5)); assert_ok!(Proxy::proxy(Origin::signed(1), anon, None, call)); - expect_event(RawEvent::ProxyExecuted(Ok(()))); + expect_event(ProxyEvent::ProxyExecuted(Ok(()))); assert_eq!(Balances::free_balance(6), 1); let call = Box::new(Call::Proxy(ProxyCall::kill_anonymous(1, ProxyType::Any, 0, 1, 0))); assert_ok!(Proxy::proxy(Origin::signed(2), anon2, None, call.clone())); let de = DispatchError::from(Error::::NoPermission).stripped(); - expect_event(RawEvent::ProxyExecuted(Err(de))); + expect_event(ProxyEvent::ProxyExecuted(Err(de))); assert_noop!( Proxy::kill_anonymous(Origin::signed(1), 1, ProxyType::Any, 0, 1, 0), Error::::NoPermission diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index 57e95ccb141df..5ef76a33c21f4 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -25,7 +25,7 @@ //! //! ## Public Functions //! -//! See the [`Module`](./struct.Module.html) struct for details of publicly available functions. +//! See the [`Module`] struct for details of publicly available functions. //! //! ## Usage //! @@ -76,7 +76,7 @@ fn block_number_to_index(block_number: T::BlockNumber) -> usize { decl_module! { pub struct Module for enum Call where origin: T::Origin { fn on_initialize(block_number: T::BlockNumber) -> Weight { - let parent_hash = >::parent_hash(); + let parent_hash = >::parent_hash(); >::mutate(|ref mut values| if values.len() < RANDOM_MATERIAL_LEN as usize { values.push(parent_hash) @@ -111,7 +111,7 @@ impl Randomness for Module { /// and mean that all bits of the resulting value are entirely manipulatable by the author of /// the parent block, who can determine the value of `parent_hash`. fn random(subject: &[u8]) -> (T::Hash, T::BlockNumber) { - let block_number = >::block_number(); + let block_number = >::block_number(); let index = block_number_to_index::(block_number); let hash_series = >::get(); @@ -157,8 +157,8 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - CollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + CollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage}, } ); diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 00cd6ff2a7f79..cb991e64945a1 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -17,8 +17,8 @@ //! # Recovery Pallet //! -//! - [`recovery::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! @@ -496,7 +496,7 @@ decl_module! { T::Currency::reserve(&who, recovery_deposit)?; // Create an active recovery status let recovery_status = ActiveRecovery { - created: >::block_number(), + created: >::block_number(), deposit: recovery_deposit, friends: vec![], }; @@ -578,7 +578,7 @@ decl_module! { let active_recovery = Self::active_recovery(&account, &who).ok_or(Error::::NotStarted)?; ensure!(!Proxy::::contains_key(&who), Error::::AlreadyProxy); // Make sure the delay period has passed - let current_block_number = >::block_number(); + let current_block_number = >::block_number(); let recoverable_block_number = active_recovery.created .checked_add(&recovery_config.delay_period) .ok_or(Error::::Overflow)?; @@ -588,7 +588,7 @@ decl_module! { recovery_config.threshold as usize <= active_recovery.friends.len(), Error::::Threshold ); - system::Module::::inc_consumers(&who).map_err(|_| Error::::BadState)?; + system::Pallet::::inc_consumers(&who).map_err(|_| Error::::BadState)?; // Create the recovery storage item Proxy::::insert(&who, &account); Self::deposit_event(RawEvent::AccountRecovered(account, who)); @@ -677,7 +677,7 @@ decl_module! { // Check `who` is allowed to make a call on behalf of `account` ensure!(Self::proxy(&who) == Some(account), Error::::NotAllowed); Proxy::::remove(&who); - system::Module::::dec_consumers(&who); + system::Pallet::::dec_consumers(&who); } } } diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index ee38b0e24cc60..301dd8dba8ddd 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -35,9 +35,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Recovery: recovery::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Recovery: recovery::{Pallet, Call, Storage, Event}, } ); diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 37ccb900a824a..563a1ba89c86f 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -26,7 +26,7 @@ use frame_support::{ensure, traits::OnInitialize}; use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use crate::Module as Scheduler; -use frame_system::Module as System; +use frame_system::Pallet as System; const BLOCK_NUMBER: u32 = 2; diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 5cab10b0aff38..9848c9853d0bf 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -18,9 +18,9 @@ //! # Scheduler //! A module for scheduling dispatches. //! -//! - [`scheduler::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Module`] //! //! ## Overview //! @@ -465,7 +465,7 @@ impl Module { } fn resolve_time(when: DispatchTime) -> Result { - let now = frame_system::Module::::block_number(); + let now = frame_system::Pallet::::block_number(); let when = match when { DispatchTime::At(x) => x, @@ -793,9 +793,9 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Logger: logger::{Module, Call, Event}, - Scheduler: scheduler::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Logger: logger::{Pallet, Call, Event}, + Scheduler: scheduler::{Pallet, Call, Storage, Event}, } ); diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index ce2279b150050..da26872a0071a 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -37,9 +37,9 @@ //! from the `Pool` and `Members`; the entity is immediately replaced //! by the next highest scoring candidate in the pool, if available. //! -//! - [`scored_pool::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Module`] //! //! ## Interface //! diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 3c4263b813e41..76f9dd848d6c0 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -37,9 +37,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - ScoredPool: pallet_scored_pool::{Module, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + ScoredPool: pallet_scored_pool::{Pallet, Call, Storage, Config, Event}, } ); diff --git a/frame/scored-pool/src/tests.rs b/frame/scored-pool/src/tests.rs index 8f33f30f6ed8e..e24ee91164973 100644 --- a/frame/scored-pool/src/tests.rs +++ b/frame/scored-pool/src/tests.rs @@ -24,8 +24,8 @@ use frame_support::{assert_ok, assert_noop, traits::OnInitialize}; use sp_runtime::traits::BadOrigin; type ScoredPool = Module; -type System = frame_system::Module; -type Balances = pallet_balances::Module; +type System = frame_system::Pallet; +type Balances = pallet_balances::Pallet; #[test] fn query_membership_works() { diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index 47265ed5ef7a8..0c83347b1991f 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -31,14 +31,14 @@ pallet-staking-reward-curve = { version = "3.0.0", path = "../../staking/reward- sp-io ={ version = "3.0.0", path = "../../../primitives/io" } pallet-timestamp = { version = "3.0.0", path = "../../timestamp" } pallet-balances = { version = "3.0.0", path = "../../balances" } -sp-election-providers = { version = "3.0.0", path = "../../../primitives/election-providers" } +frame-election-provider-support = { version = "3.0.0", path = "../../election-provider-support" } [features] default = ["std"] std = [ "sp-std/std", "sp-session/std", - "sp-election-providers/std", + "frame-election-provider-support/std", "sp-runtime/std", "frame-system/std", "frame-benchmarking/std", diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 8546800ee4fdc..fff3717607f8f 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -35,16 +35,16 @@ use frame_system::RawOrigin; use pallet_session::{historical::Module as Historical, Module as Session, *}; use pallet_staking::{ benchmarking::create_validator_with_nominators, testing_utils::create_validators, - MAX_NOMINATIONS, RewardDestination, + RewardDestination, }; use sp_runtime::traits::{One, StaticLookup}; const MAX_VALIDATORS: u32 = 1000; -pub struct Module(pallet_session::Module); +pub struct Pallet(pallet_session::Module); pub trait Config: pallet_session::Config + pallet_session::historical::Config + pallet_staking::Config {} -impl OnInitialize for Module { +impl OnInitialize for Pallet { fn on_initialize(n: T::BlockNumber) -> frame_support::weights::Weight { pallet_session::Module::::on_initialize(n) } @@ -52,10 +52,10 @@ impl OnInitialize for Module { benchmarks! { set_keys { - let n = MAX_NOMINATIONS as u32; + let n = ::MAX_NOMINATIONS; let (v_stash, _) = create_validator_with_nominators::( n, - MAX_NOMINATIONS as u32, + ::MAX_NOMINATIONS, false, RewardDestination::Staked, )?; @@ -68,10 +68,10 @@ benchmarks! { }: _(RawOrigin::Signed(v_controller), keys, proof) purge_keys { - let n = MAX_NOMINATIONS as u32; + let n = ::MAX_NOMINATIONS; let (v_stash, _) = create_validator_with_nominators::( n, - MAX_NOMINATIONS as u32, + ::MAX_NOMINATIONS, false, RewardDestination::Staked )?; @@ -157,7 +157,7 @@ fn check_membership_proof_setup( Session::::set_keys(RawOrigin::Signed(controller).into(), keys, proof).unwrap(); } - Module::::on_initialize(T::BlockNumber::one()); + Pallet::::on_initialize(T::BlockNumber::one()); // skip sessions until the new validator set is enacted while Session::::validators().len() < n as usize { @@ -170,7 +170,7 @@ fn check_membership_proof_setup( } impl_benchmark_test_suite!( - Module, + Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false, diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 0eba5452b28d0..53afeb620c260 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -20,7 +20,7 @@ #![cfg(test)] use sp_runtime::traits::IdentityLookup; -use sp_election_providers::onchain; +use frame_election_provider_support::onchain; use frame_support::parameter_types; type AccountId = u64; @@ -37,10 +37,10 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Staking: pallet_staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, } ); @@ -157,13 +157,15 @@ where impl onchain::Config for Test { type AccountId = AccountId; type BlockNumber = BlockNumber; + type BlockWeights = (); type Accuracy = sp_runtime::Perbill; type DataProvider = Staking; } impl pallet_staking::Config for Test { + const MAX_NOMINATIONS: u32 = 16; type Currency = Balances; - type UnixTime = pallet_timestamp::Module; + type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); type Event = Event; @@ -174,15 +176,9 @@ impl pallet_staking::Config for Test { type SlashCancelOrigin = frame_system::EnsureRoot; type BondingDuration = (); type SessionInterface = Self; - type RewardCurve = RewardCurve; + type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type ElectionLookahead = (); - type Call = Call; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type UnsignedPriority = UnsignedPriority; - type MaxIterations = (); - type MinSolutionScoreBump = (); - type OffchainSolutionWeightLimit = (); type ElectionProvider = onchain::OnChainSequentialPhragmen; type WeightInfo = (); } diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 9b4d2704cf456..8902ebe551f6c 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -358,7 +358,7 @@ pub(crate) mod tests { ); BasicExternalities::execute_with_storage(&mut t, || { for (ref k, ..) in &keys { - frame_system::Module::::inc_providers(k); + frame_system::Pallet::::inc_providers(k); } }); crate::GenesisConfig:: { keys }.assimilate_storage(&mut t).unwrap(); diff --git a/frame/session/src/historical/offchain.rs b/frame/session/src/historical/offchain.rs index f095be9e44e24..f675d878c1e28 100644 --- a/frame/session/src/historical/offchain.rs +++ b/frame/session/src/historical/offchain.rs @@ -28,7 +28,7 @@ use sp_runtime::{offchain::storage::StorageValueRef, KeyTypeId}; use sp_session::MembershipProof; -use super::super::{Module as SessionModule, SessionIndex}; +use super::super::{Pallet as SessionModule, SessionIndex}; use super::{IdentificationTuple, ProvingTrie, Config}; use super::shared; @@ -167,7 +167,7 @@ mod tests { ); BasicExternalities::execute_with_storage(&mut t, || { for (ref k, ..) in &keys { - frame_system::Module::::inc_providers(k); + frame_system::Pallet::::inc_providers(k); } }); diff --git a/frame/session/src/historical/onchain.rs b/frame/session/src/historical/onchain.rs index 3b933bf262a00..8fe63a79e1c59 100644 --- a/frame/session/src/historical/onchain.rs +++ b/frame/session/src/historical/onchain.rs @@ -21,7 +21,7 @@ use codec::Encode; use sp_runtime::traits::Convert; use super::super::Config as SessionConfig; -use super::super::{Module as SessionModule, SessionIndex}; +use super::super::{Pallet as SessionModule, SessionIndex}; use super::Config as HistoricalConfig; use super::shared; diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 45f3ae9dfce47..e7b16808f7239 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -20,9 +20,9 @@ //! The Session module allows validators to manage their session keys, provides a function for //! changing the session length, and handles session rotation. //! -//! - [`session::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Module`] //! //! ## Overview //! @@ -177,12 +177,12 @@ impl< // (0% is never returned). let progress = if now >= offset { let current = (now - offset) % period.clone() + One::one(); - Some(Percent::from_rational_approximation( + Some(Percent::from_rational( current.clone(), period.clone(), )) } else { - Some(Percent::from_rational_approximation( + Some(Percent::from_rational( now + One::one(), offset, )) @@ -442,7 +442,11 @@ decl_storage! { for (account, val, keys) in config.keys.iter().cloned() { >::inner_set_keys(&val, keys) .expect("genesis config must not contain duplicates; qed"); - assert!(frame_system::Module::::inc_consumers(&account).is_ok()); + assert!( + frame_system::Pallet::::inc_consumers(&account).is_ok(), + "Account ({:?}) does not exist at genesis to set key. Account not endowed?", + account, + ); } let initial_validators_0 = T::SessionManager::new_session(0) @@ -746,10 +750,10 @@ impl Module { let who = T::ValidatorIdOf::convert(account.clone()) .ok_or(Error::::NoAssociatedValidatorId)?; - frame_system::Module::::inc_consumers(&account).map_err(|_| Error::::NoAccount)?; + frame_system::Pallet::::inc_consumers(&account).map_err(|_| Error::::NoAccount)?; let old_keys = Self::inner_set_keys(&who, keys)?; if old_keys.is_some() { - let _ = frame_system::Module::::dec_consumers(&account); + let _ = frame_system::Pallet::::dec_consumers(&account); // ^^^ Defensive only; Consumers were incremented just before, so should never fail. } @@ -798,7 +802,7 @@ impl Module { let key_data = old_keys.get_raw(*id); Self::clear_key_owner(*id, key_data); } - frame_system::Module::::dec_consumers(&account); + frame_system::Pallet::::dec_consumers(&account); Ok(()) } diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index 73499bf739b87..b64359fccee32 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -78,9 +78,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, - Historical: pallet_session_historical::{Module}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Historical: pallet_session_historical::{Pallet}, } ); @@ -91,8 +91,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, } ); @@ -210,11 +210,11 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ); BasicExternalities::execute_with_storage(&mut t, || { for (ref k, ..) in &keys { - frame_system::Module::::inc_providers(k); + frame_system::Pallet::::inc_providers(k); } - frame_system::Module::::inc_providers(&4); + frame_system::Pallet::::inc_providers(&4); // An additional identity that we use. - frame_system::Module::::inc_providers(&69); + frame_system::Pallet::::inc_providers(&69); }); pallet_session::GenesisConfig:: { keys }.assimilate_storage(&mut t).unwrap(); sp_io::TestExternalities::new(t) diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 66d89d67dd6e7..64caf328002af 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -17,8 +17,8 @@ //! # Society Module //! -//! - [`society::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! @@ -272,7 +272,7 @@ type BalanceOf = <>::Currency as Currency< = <::Currency as Currency<::AccountId>>::NegativeImbalance; /// The module's configuration trait. -pub trait Config: system::Config { +pub trait Config: system::Config { /// The overarching event type. type Event: From> + Into<::Event>; @@ -316,6 +316,9 @@ pub trait Config: system::Config { /// The number of blocks between membership challenges. type ChallengePeriod: Get; + + /// The maximum number of candidates that we accept per round. + type MaxCandidateIntake: Get; } /// A vote by a member on a candidate application. @@ -497,6 +500,9 @@ decl_module! { /// The societies's module id const ModuleId: ModuleId = T::ModuleId::get(); + /// Maximum candidate intake per round. + const MaxCandidateIntake: u32 = T::MaxCandidateIntake::get(); + // Used for handling module events. fn deposit_event() = default; @@ -584,7 +590,8 @@ decl_module! { // no reason that either should fail. match b.remove(pos).kind { BidKind::Deposit(deposit) => { - let _ = T::Currency::unreserve(&who, deposit); + let err_amount = T::Currency::unreserve(&who, deposit); + debug_assert!(err_amount.is_zero()); } BidKind::Vouch(voucher, _) => { >::remove(&voucher); @@ -793,7 +800,7 @@ decl_module! { let mut payouts = >::get(&who); if let Some((when, amount)) = payouts.first() { - if when <= &>::block_number() { + if when <= &>::block_number() { T::Currency::transfer(&Self::payouts(), &who, *amount, AllowDeath)?; payouts.remove(0); if payouts.is_empty() { @@ -981,7 +988,7 @@ decl_module! { // Reduce next pot by payout >::put(pot - value); // Add payout for new candidate - let maturity = >::block_number() + let maturity = >::block_number() + Self::lock_duration(Self::members().len() as u32); Self::pay_accepted_candidate(&who, value, kind, maturity); } @@ -1235,7 +1242,8 @@ impl, I: Instance> Module { let Bid { who: popped, kind, .. } = bids.pop().expect("b.len() > 1000; qed"); match kind { BidKind::Deposit(deposit) => { - let _ = T::Currency::unreserve(&popped, deposit); + let err_amount = T::Currency::unreserve(&popped, deposit); + debug_assert!(err_amount.is_zero()); } BidKind::Vouch(voucher, _) => { >::remove(&voucher); @@ -1324,7 +1332,7 @@ impl, I: Instance> Module { // critical issues or side-effects. This is auto-correcting as members fall out of society. members.reserve(candidates.len()); - let maturity = >::block_number() + let maturity = >::block_number() + Self::lock_duration(members.len() as u32); let mut rewardees = Vec::new(); @@ -1402,7 +1410,8 @@ impl, I: Instance> Module { Self::bump_payout(winner, maturity, total_slash); } else { // Move the slashed amount back from payouts account to local treasury. - let _ = T::Currency::transfer(&Self::payouts(), &Self::account_id(), total_slash, AllowDeath); + let res = T::Currency::transfer(&Self::payouts(), &Self::account_id(), total_slash, AllowDeath); + debug_assert!(res.is_ok()); } } @@ -1413,7 +1422,8 @@ impl, I: Instance> Module { // this should never fail since we ensure we can afford the payouts in a previous // block, but there's not much we can do to recover if it fails anyway. - let _ = T::Currency::transfer(&Self::account_id(), &Self::payouts(), total_payouts, AllowDeath); + let res = T::Currency::transfer(&Self::account_id(), &Self::payouts(), total_payouts, AllowDeath); + debug_assert!(res.is_ok()); } // if at least one candidate was accepted... @@ -1514,7 +1524,8 @@ impl, I: Instance> Module { BidKind::Deposit(deposit) => { // In the case that a normal deposit bid is accepted we unreserve // the deposit. - let _ = T::Currency::unreserve(candidate, deposit); + let err_amount = T::Currency::unreserve(candidate, deposit); + debug_assert!(err_amount.is_zero()); value } BidKind::Vouch(voucher, tip) => { @@ -1615,11 +1626,11 @@ impl, I: Instance> Module { /// May be empty. pub fn take_selected( members_len: usize, - pot: BalanceOf + pot: BalanceOf, ) -> Vec>> { let max_members = MaxMembers::::get() as usize; - // No more than 10 will be returned. - let mut max_selections: usize = 10.min(max_members.saturating_sub(members_len)); + let mut max_selections: usize = + (T::MaxCandidateIntake::get() as usize).min(max_members.saturating_sub(members_len)); if max_selections > 0 { // Get the number of left-most bidders whose bids add up to less than `pot`. diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 0a684b2a8dc89..ff80b50b6d358 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -41,9 +41,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Society: pallet_society::{Module, Call, Storage, Event, Config}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Society: pallet_society::{Pallet, Call, Storage, Event, Config}, } ); @@ -57,6 +57,7 @@ parameter_types! { pub const ChallengePeriod: u64 = 8; pub const BlockHashCount: u64 = 250; pub const ExistentialDeposit: u64 = 1; + pub const MaxCandidateIntake: u32 = 10; pub const SocietyModuleId: ModuleId = ModuleId(*b"py/socie"); pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(1024); @@ -104,7 +105,7 @@ impl pallet_balances::Config for Test { impl Config for Test { type Event = Event; - type Currency = pallet_balances::Module; + type Currency = pallet_balances::Pallet; type Randomness = TestRandomness; type CandidateDeposit = CandidateDeposit; type WrongSideDeduction = WrongSideDeduction; @@ -116,6 +117,7 @@ impl Config for Test { type FounderSetOrigin = EnsureSignedBy; type SuspensionJudgementOrigin = EnsureSignedBy; type ChallengePeriod = ChallengePeriod; + type MaxCandidateIntake = MaxCandidateIntake; type ModuleId = SocietyModuleId; } diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 1f9f29570a223..908e361e667e3 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -17,8 +17,6 @@ static_assertions = "1.1.0" serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } -# TWO_PHASE_NOTE:: ideally we should be able to get rid of this. -sp-npos-elections = { version = "3.0.0", default-features = false, path = "../../primitives/npos-elections" } sp-io ={ version = "3.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } @@ -27,8 +25,9 @@ frame-system = { version = "3.0.0", default-features = false, path = "../system" pallet-session = { version = "3.0.0", default-features = false, features = ["historical"], path = "../session" } pallet-authorship = { version = "3.0.0", default-features = false, path = "../authorship" } sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } +frame-election-provider-support = { version = "3.0.0", default-features = false, path = "../election-provider-support" } log = { version = "0.4.14", default-features = false } -sp-election-providers = { version = "3.0.0", default-features = false, path = "../../primitives/election-providers" } +paste = "1.0" # Optional imports for benchmarking frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } @@ -43,7 +42,7 @@ pallet-timestamp = { version = "3.0.0", path = "../timestamp" } pallet-staking-reward-curve = { version = "3.0.0", path = "../staking/reward-curve" } substrate-test-utils = { version = "3.0.0", path = "../../test-utils" } frame-benchmarking = { version = "3.1.0", path = "../benchmarking" } -sp-election-providers = { version = "3.0.0", features = ["runtime-benchmarks"], path = "../../primitives/election-providers" } +frame-election-provider-support = { version = "3.0.0", features = ["runtime-benchmarks"], path = "../election-provider-support" } rand_chacha = { version = "0.2" } parking_lot = "0.11.1" hex = "0.4" @@ -54,7 +53,6 @@ std = [ "serde", "codec/std", "sp-std/std", - "sp-npos-elections/std", "sp-io/std", "frame-support/std", "sp-runtime/std", @@ -64,11 +62,11 @@ std = [ "pallet-authorship/std", "sp-application-crypto/std", "log/std", - "sp-election-providers/std", + "frame-election-provider-support/std", ] runtime-benchmarks = [ "frame-benchmarking", - "sp-election-providers/runtime-benchmarks", + "frame-election-provider-support/runtime-benchmarks", "rand_chacha", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/staking/fuzzer/.gitignore b/frame/staking/fuzzer/.gitignore deleted file mode 100644 index 3ebcb104d4a50..0000000000000 --- a/frame/staking/fuzzer/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -hfuzz_target -hfuzz_workspace diff --git a/frame/staking/fuzzer/Cargo.lock b/frame/staking/fuzzer/Cargo.lock deleted file mode 100644 index e451e12d10131..0000000000000 --- a/frame/staking/fuzzer/Cargo.lock +++ /dev/null @@ -1,2294 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - -[[package]] -name = "ahash" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f33b5018f120946c1dcf279194f238a9f146725593ead1c08fa47ff22b0b5d3" -dependencies = [ - "const-random", -] - -[[package]] -name = "aho-corasick" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" -dependencies = [ - "memchr", -] - -[[package]] -name = "alga" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f823d037a7ec6ea2197046bafd4ae150e6bc36f9ca347404f46a46823fa84f2" -dependencies = [ - "approx", - "num-complex", - "num-traits", -] - -[[package]] -name = "approx" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3" -dependencies = [ - "num-traits", -] - -[[package]] -name = "arbitrary" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75153c95fdedd7db9732dfbfc3702324a1627eec91ba56e37cd0ac78314ab2ed" - -[[package]] -name = "arrayref" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - -[[package]] -name = "arrayvec" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" -dependencies = [ - "nodrop", -] - -[[package]] -name = "arrayvec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" - -[[package]] -name = "autocfg" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" - -[[package]] -name = "autocfg" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" - -[[package]] -name = "backtrace" -version = "0.3.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e692897359247cc6bb902933361652380af0f1b7651ae5c5013407f30e109e" -dependencies = [ - "backtrace-sys", - "cfg-if", - "libc", - "rustc-demangle", -] - -[[package]] -name = "backtrace-sys" -version = "0.1.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7de8aba10a69c8e8d7622c5710229485ec32e9d55fdad160ea559c086fdcd118" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "base58" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" - -[[package]] -name = "bitmask" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da9b3d9f6f585199287a473f4f8dfab6566cf827d15c00c219f53c645687ead" - -[[package]] -name = "bitvec" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" -dependencies = [ - "either", - "radium", -] - -[[package]] -name = "blake2-rfc" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" -dependencies = [ - "arrayvec 0.4.12", - "constant_time_eq", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", -] - -[[package]] -name = "bumpalo" -version = "3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187" - -[[package]] -name = "byte-slice-cast" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - -[[package]] -name = "byteorder" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" - -[[package]] -name = "cc" -version = "1.0.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "clear_on_drop" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97276801e127ffb46b66ce23f35cc96bd454fa311294bced4bbace7baa8b1d17" -dependencies = [ - "cc", -] - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags", -] - -[[package]] -name = "const-random" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f1af9ac737b2dd2d577701e59fd09ba34822f6f2ebdb30a7647405d9e55e16a" -dependencies = [ - "const-random-macro", - "proc-macro-hack", -] - -[[package]] -name = "const-random-macro" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25e4c606eb459dd29f7c57b2e0879f2b6f14ee130918c2b78ccb58a9624e6c7a" -dependencies = [ - "getrandom", - "proc-macro-hack", -] - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-mac" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -dependencies = [ - "generic-array", - "subtle 1.0.0", -] - -[[package]] -name = "curve25519-dalek" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26778518a7f6cffa1d25a44b602b62b979bd88adb9e99ffec546998cf3404839" -dependencies = [ - "byteorder", - "digest", - "rand_core 0.5.1", - "subtle 2.2.2", - "zeroize", -] - -[[package]] -name = "derive_more" -version = "0.99.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2323f3f47db9a0e77ce7a300605d8d2098597fc451ed1a97bb1f6411bb550a7" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array", -] - -[[package]] -name = "ed25519-dalek" -version = "1.0.0-pre.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978710b352437433c97b2bff193f2fb1dfd58a093f863dd95e225a19baa599a2" -dependencies = [ - "clear_on_drop", - "curve25519-dalek", - "rand 0.7.3", - "sha2", -] - -[[package]] -name = "either" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" - -[[package]] -name = "environmental" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "516aa8d7a71cb00a1c4146f0798549b93d083d4f189b3ced8f3de6b8f11ee6c4" - -[[package]] -name = "failure" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8529c2421efa3066a5cbd8063d2244603824daccb6936b079010bb2aa89464b" -dependencies = [ - "backtrace", - "failure_derive", -] - -[[package]] -name = "failure_derive" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - -[[package]] -name = "fixed-hash" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32529fc42e86ec06e5047092082aab9ad459b070c5d2a76b14f4f5ce70bf2e84" -dependencies = [ - "byteorder", - "rand 0.7.3", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "frame-benchmarking" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support", - "frame-system", - "linregress", - "parity-scale-codec", - "sp-api", - "sp-io", - "sp-runtime", - "sp-runtime-interface", - "sp-std", -] - -[[package]] -name = "frame-metadata" -version = "11.0.0-alpha.5" -dependencies = [ - "parity-scale-codec", - "serde", - "sp-core", - "sp-std", -] - -[[package]] -name = "frame-support" -version = "2.0.0-alpha.5" -dependencies = [ - "bitmask", - "frame-metadata", - "frame-support-procedural", - "impl-trait-for-tuples", - "log", - "once_cell", - "parity-scale-codec", - "paste", - "serde", - "sp-arithmetic", - "sp-core", - "sp-inherents", - "sp-io", - "sp-runtime", - "sp-state-machine", - "sp-std", - "tracing", -] - -[[package]] -name = "frame-support-procedural" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support-procedural-tools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support-procedural-tools-derive", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools-derive" -version = "2.0.0-alpha.5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-system" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support", - "impl-trait-for-tuples", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "sp-version", -] - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - -[[package]] -name = "futures" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c329ae8753502fb44ae4fc2b622fa2a94652c41e795143765ba0927f92ab780" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" - -[[package]] -name = "futures-executor" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f674f3e1bcb15b37284a90cedf55afdba482ab061c407a9c0ebbd0f3109741ba" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", - "num_cpus", -] - -[[package]] -name = "futures-io" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6" - -[[package]] -name = "futures-macro" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "futures-sink" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" - -[[package]] -name = "futures-task" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" - -[[package]] -name = "futures-util" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-utils", - "proc-macro-hack", - "proc-macro-nested", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -dependencies = [ - "typenum", -] - -[[package]] -name = "getrandom" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "hash-db" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" - -[[package]] -name = "hash256-std-hasher" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" -dependencies = [ - "crunchy", -] - -[[package]] -name = "hashbrown" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" -dependencies = [ - "ahash", - "autocfg 0.1.7", -] - -[[package]] -name = "heck" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "hermit-abi" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" -dependencies = [ - "libc", -] - -[[package]] -name = "hex" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" - -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -dependencies = [ - "crypto-mac", - "digest", -] - -[[package]] -name = "hmac-drbg" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" -dependencies = [ - "digest", - "generic-array", - "hmac", -] - -[[package]] -name = "impl-codec" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-serde" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e3cae7e99c7ff5a995da2cf78dd0a5383740eda71d98cf7b1910c301ac69b8" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-serde" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bbe9ea9b182f0fb1cabbd61f4ff9b7b7b9197955e95a7e4c27de5055eb29ff8" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "integer-sqrt" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f65877bf7d44897a473350b1046277941cee20b263397e90869c50b6e766088b" - -[[package]] -name = "js-sys" -version = "0.3.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "keccak" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0" - -[[package]] -name = "libfuzzer-sys" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d718794b8e23533b9069bd2c4597d69e41cc7ab1c02700a502971aca0cdcf24" -dependencies = [ - "arbitrary", - "cc", -] - -[[package]] -name = "libm" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" - -[[package]] -name = "libsecp256k1" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" -dependencies = [ - "arrayref", - "crunchy", - "digest", - "hmac-drbg", - "rand 0.7.3", - "sha2", - "subtle 2.2.2", - "typenum", -] - -[[package]] -name = "linregress" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9290cf6f928576eeb9c096c6fad9d8d452a0a1a70a2bbffa6e36064eedc0aac9" -dependencies = [ - "failure", - "nalgebra", - "statrs", -] - -[[package]] -name = "lock_api" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "matrixmultiply" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4f7ec66360130972f34830bfad9ef05c6610a43938a467bcc9ab9369ab3478f" -dependencies = [ - "rawpointer", -] - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - -[[package]] -name = "memchr" -version = "2.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" - -[[package]] -name = "memory-db" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58381b20ebe2c578e75dececd9da411414903415349548ccc46aac3209cdfbc" -dependencies = [ - "ahash", - "hash-db", - "hashbrown", - "parity-util-mem", -] - -[[package]] -name = "memory_units" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" - -[[package]] -name = "merlin" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" -dependencies = [ - "byteorder", - "keccak", - "rand_core 0.5.1", - "zeroize", -] - -[[package]] -name = "nalgebra" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaa9fddbc34c8c35dd2108515587b8ce0cab396f17977b8c738568e4edb521a2" -dependencies = [ - "alga", - "approx", - "generic-array", - "matrixmultiply", - "num-complex", - "num-rational", - "num-traits", - "rand 0.6.5", - "typenum", -] - -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg 1.0.0", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-complex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" -dependencies = [ - "autocfg 1.0.0", - "num-traits", -] - -[[package]] -name = "num-integer" -version = "0.1.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" -dependencies = [ - "autocfg 1.0.0", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg 1.0.0", - "num-bigint", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" -dependencies = [ - "autocfg 1.0.0", - "libm", -] - -[[package]] -name = "num_cpus" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "once_cell" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" -dependencies = [ - "parking_lot 0.9.0", -] - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - -[[package]] -name = "pallet-authorship" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-authorship", - "sp-core", - "sp-inherents", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-balances" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-indices" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-keyring", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-session" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "pallet-timestamp", - "parity-scale-codec", - "serde", - "sp-io", - "sp-runtime", - "sp-staking", - "sp-std", - "sp-trie", -] - -[[package]] -name = "pallet-staking" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support", - "frame-system", - "pallet-authorship", - "pallet-indices", - "pallet-session", - "parity-scale-codec", - "rand 0.7.3", - "serde", - "sp-application-crypto", - "sp-core", - "sp-io", - "sp-npos-elections", - "sp-runtime", - "sp-staking", - "sp-std", - "static_assertions", -] - -[[package]] -name = "pallet-staking-fuzz" -version = "0.0.0" -dependencies = [ - "frame-support", - "frame-system", - "libfuzzer-sys", - "pallet-balances", - "pallet-indices", - "pallet-session", - "pallet-staking", - "pallet-staking-reward-curve", - "pallet-timestamp", - "parity-scale-codec", - "rand 0.7.3", - "sp-core", - "sp-io", - "sp-npos-elections", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-staking-reward-curve" -version = "2.0.0-alpha.5" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pallet-timestamp" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "parity-scale-codec", - "serde", - "sp-inherents", - "sp-runtime", - "sp-std", - "sp-timestamp", -] - -[[package]] -name = "parity-scale-codec" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "329c8f7f4244ddb5c37c103641027a76c530e65e8e4b8240b29f81ea40508b17" -dependencies = [ - "arrayvec 0.5.1", - "bitvec", - "byte-slice-cast", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0ec292e92e8ec7c58e576adacc1e3f399c597c8f263c42f18420abe58e7245" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "parity-util-mem" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e42755f26e5ea21a6a819d9e63cbd70713e9867a2b767ec2cc65ca7659532c5" -dependencies = [ - "cfg-if", - "impl-trait-for-tuples", - "parity-util-mem-derive", - "parking_lot 0.10.0", - "primitive-types", - "winapi", -] - -[[package]] -name = "parity-util-mem-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" -dependencies = [ - "proc-macro2", - "syn", - "synstructure", -] - -[[package]] -name = "parity-wasm" -version = "0.41.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" - -[[package]] -name = "parking_lot" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" -dependencies = [ - "lock_api", - "parking_lot_core 0.6.2", - "rustc_version", -] - -[[package]] -name = "parking_lot" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e98c49ab0b7ce5b222f2cc9193fc4efe11c6d0bd4f648e374684a6857b1cfc" -dependencies = [ - "lock_api", - "parking_lot_core 0.7.0", -] - -[[package]] -name = "parking_lot_core" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" -dependencies = [ - "cfg-if", - "cloudabi", - "libc", - "redox_syscall", - "rustc_version", - "smallvec 0.6.13", - "winapi", -] - -[[package]] -name = "parking_lot_core" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7582838484df45743c8434fbff785e8edf260c28748353d44bc0da32e0ceabf1" -dependencies = [ - "cfg-if", - "cloudabi", - "libc", - "redox_syscall", - "smallvec 1.2.0", - "winapi", -] - -[[package]] -name = "paste" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "092d791bf7847f70bbd49085489fba25fc2c193571752bff9e36e74e72403932" -dependencies = [ - "paste-impl", - "proc-macro-hack", -] - -[[package]] -name = "paste-impl" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406c23fb4c45cc6f68a9bbabb8ec7bd6f8cfcbd17e9e8f72c2460282f8325729" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pbkdf2" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" -dependencies = [ - "byteorder", - "crypto-mac", -] - -[[package]] -name = "pin-utils" -version = "0.1.0-alpha.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" - -[[package]] -name = "ppv-lite86" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" - -[[package]] -name = "primitive-types" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5e4b9943a2da369aec5e96f7c10ebc74fcf434d39590d974b0a3460e6f67fbb" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-serde 0.3.0", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e10d4b51f154c8a7fb96fd6dad097cb74b863943ec010ac94b9fd1be8861fe1e" -dependencies = [ - "toml", -] - -[[package]] -name = "proc-macro-hack" -version = "0.5.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcfdefadc3d57ca21cf17990a28ef4c0f7c61383a28cb7604cf4a18e6ede1420" - -[[package]] -name = "proc-macro-nested" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" - -[[package]] -name = "proc-macro2" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "quote" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - -[[package]] -name = "rand" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "winapi", -] - -[[package]] -name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -dependencies = [ - "autocfg 0.1.7", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc 0.1.0", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg", - "rand_xorshift", - "winapi", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", -] - -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.3.1", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "winapi", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.4.2", -] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rawpointer" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "redox_syscall" -version = "0.1.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" - -[[package]] -name = "regex" -version = "1.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", - "thread_local", -] - -[[package]] -name = "regex-syntax" -version = "0.6.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" - -[[package]] -name = "rustc-demangle" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver", -] - -[[package]] -name = "schnorrkel" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" -dependencies = [ - "arrayref", - "arrayvec 0.5.1", - "curve25519-dalek", - "getrandom", - "merlin", - "rand 0.7.3", - "rand_core 0.5.1", - "sha2", - "subtle 2.2.2", - "zeroize", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - -[[package]] -name = "send_wrapper" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eddf2e8f50ced781f288c19f18621fa72a3779e3cb58dbf23b07469b0abeb4" - -[[package]] -name = "serde" -version = "1.0.105" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e707fbbf255b8fc8c3b99abb91e7257a622caeb20a9818cbadbeeede4e0932ff" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.105" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac5d00fc561ba2724df6758a17de23df5914f20e41cb00f94d5b7ae42fffaff8" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sha2" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27044adfd2e1f077f649f59deb9490d3941d674002f7d062870a60ebe9bd47a0" -dependencies = [ - "block-buffer", - "digest", - "fake-simd", - "opaque-debug", -] - -[[package]] -name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" - -[[package]] -name = "smallvec" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" -dependencies = [ - "maybe-uninit", -] - -[[package]] -name = "smallvec" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" - -[[package]] -name = "sp-api" -version = "2.0.0-alpha.5" -dependencies = [ - "hash-db", - "parity-scale-codec", - "sp-api-proc-macro", - "sp-core", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-version", -] - -[[package]] -name = "sp-api-proc-macro" -version = "2.0.0-alpha.5" -dependencies = [ - "blake2-rfc", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-application-crypto" -version = "2.0.0-alpha.5" -dependencies = [ - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-std", -] - -[[package]] -name = "sp-arithmetic" -version = "2.0.0-alpha.5" -dependencies = [ - "integer-sqrt", - "num-traits", - "parity-scale-codec", - "serde", - "sp-debug-derive", - "sp-std", -] - -[[package]] -name = "sp-authorship" -version = "2.0.0-alpha.5" -dependencies = [ - "parity-scale-codec", - "sp-inherents", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-core" -version = "2.0.0-alpha.5" -dependencies = [ - "base58", - "blake2-rfc", - "byteorder", - "ed25519-dalek", - "futures", - "hash-db", - "hash256-std-hasher", - "hex", - "impl-serde 0.3.0", - "lazy_static", - "libsecp256k1", - "log", - "num-traits", - "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.10.0", - "primitive-types", - "rand 0.7.3", - "regex", - "schnorrkel", - "serde", - "sha2", - "sp-debug-derive", - "sp-externalities", - "sp-runtime-interface", - "sp-std", - "sp-storage", - "substrate-bip39", - "tiny-bip39", - "tiny-keccak", - "twox-hash", - "wasmi", - "zeroize", -] - -[[package]] -name = "sp-debug-derive" -version = "2.0.0-alpha.5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-externalities" -version = "0.8.0-alpha.5" -dependencies = [ - "environmental", - "sp-std", - "sp-storage", -] - -[[package]] -name = "sp-inherents" -version = "2.0.0-alpha.5" -dependencies = [ - "derive_more", - "parity-scale-codec", - "parking_lot 0.10.0", - "sp-core", - "sp-std", -] - -[[package]] -name = "sp-io" -version = "2.0.0-alpha.5" -dependencies = [ - "hash-db", - "libsecp256k1", - "log", - "parity-scale-codec", - "sp-core", - "sp-externalities", - "sp-runtime-interface", - "sp-state-machine", - "sp-std", - "sp-trie", - "sp-wasm-interface", -] - -[[package]] -name = "sp-keyring" -version = "2.0.0-alpha.5" -dependencies = [ - "lazy_static", - "sp-core", - "sp-runtime", - "strum", -] - -[[package]] -name = "sp-panic-handler" -version = "2.0.0-alpha.5" -dependencies = [ - "backtrace", - "log", -] - -[[package]] -name = "sp-npos-elections" -version = "2.0.0-alpha.5" -dependencies = [ - "parity-scale-codec", - "serde", - "sp-npos-elections-compact", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-npos-elections-compact" -version = "2.0.0-rc3" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-runtime" -version = "2.0.0-alpha.5" -dependencies = [ - "hash256-std-hasher", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "parity-util-mem", - "paste", - "rand 0.7.3", - "serde", - "sp-application-crypto", - "sp-arithmetic", - "sp-core", - "sp-inherents", - "sp-io", - "sp-std", -] - -[[package]] -name = "sp-runtime-interface" -version = "2.0.0-alpha.5" -dependencies = [ - "parity-scale-codec", - "primitive-types", - "sp-externalities", - "sp-runtime-interface-proc-macro", - "sp-std", - "sp-wasm-interface", - "static_assertions", -] - -[[package]] -name = "sp-runtime-interface-proc-macro" -version = "2.0.0-alpha.5" -dependencies = [ - "Inflector", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-staking" -version = "2.0.0-alpha.5" -dependencies = [ - "parity-scale-codec", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-state-machine" -version = "0.8.0-alpha.5" -dependencies = [ - "hash-db", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot 0.10.0", - "rand 0.7.3", - "sp-core", - "sp-externalities", - "sp-panic-handler", - "sp-trie", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-std" -version = "2.0.0-alpha.5" - -[[package]] -name = "sp-storage" -version = "2.0.0-alpha.5" -dependencies = [ - "impl-serde 0.2.3", - "serde", - "sp-debug-derive", - "sp-std", -] - -[[package]] -name = "sp-timestamp" -version = "2.0.0-alpha.5" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-api", - "sp-inherents", - "sp-runtime", - "sp-std", - "wasm-timer", -] - -[[package]] -name = "sp-trie" -version = "2.0.0-alpha.5" -dependencies = [ - "hash-db", - "memory-db", - "parity-scale-codec", - "sp-core", - "sp-std", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-version" -version = "2.0.0-alpha.5" -dependencies = [ - "impl-serde 0.2.3", - "parity-scale-codec", - "serde", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-wasm-interface" -version = "2.0.0-alpha.5" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-std", - "wasmi", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "statrs" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10102ac8d55e35db2b3fafc26f81ba8647da2e15879ab686a67e6d19af2685e8" -dependencies = [ - "rand 0.5.6", -] - -[[package]] -name = "strum" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6138f8f88a16d90134763314e3fc76fa3ed6a7db4725d6acf9a3ef95a3188d22" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0054a7df764039a6cd8592b9de84be4bec368ff081d203a7d5371cbfa8e65c81" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "substrate-bip39" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c004e8166d6e0aa3a9d5fa673e5b7098ff25f930de1013a21341988151e681bb" -dependencies = [ - "hmac", - "pbkdf2", - "schnorrkel", - "sha2", -] - -[[package]] -name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" - -[[package]] -name = "subtle" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c65d530b10ccaeac294f349038a597e435b18fb456aadd0840a623f83b9e941" - -[[package]] -name = "syn" -version = "1.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "synstructure" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "unicode-xid", -] - -[[package]] -name = "thread_local" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "tiny-bip39" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0165e045cc2ae1660270ca65e1676dbaab60feb0f91b10f7d0665e9b47e31f2" -dependencies = [ - "failure", - "hmac", - "once_cell", - "pbkdf2", - "rand 0.7.3", - "rustc-hash", - "sha2", - "unicode-normalization", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2953ca5148619bc99695c1274cb54c5275bbb913c6adad87e72eaf8db9787f69" -dependencies = [ - "crunchy", -] - -[[package]] -name = "toml" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" -dependencies = [ - "serde", -] - -[[package]] -name = "tracing" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1721cc8cf7d770cc4257872507180f35a4797272f5962f24c806af9e7faf52ab" -dependencies = [ - "cfg-if", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fbad39da2f9af1cae3016339ad7f2c7a9e870f12e8fd04c4fd7ef35b30c0d2b" -dependencies = [ - "quote", - "syn", -] - -[[package]] -name = "tracing-core" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa83a9a47081cd522c09c81b31aec2c9273424976f922ad61c053b58350b715" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "trie-db" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de9222c50cc325855621271157c973da27a0dcd26fa06f8edf81020bd2333df0" -dependencies = [ - "hash-db", - "hashbrown", - "log", - "rustc-hex", - "smallvec 1.2.0", -] - -[[package]] -name = "trie-root" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" -dependencies = [ - "hash-db", -] - -[[package]] -name = "twox-hash" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56" -dependencies = [ - "rand 0.7.3", -] - -[[package]] -name = "typenum" -version = "1.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" - -[[package]] -name = "uint" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e75a4cdd7b87b28840dba13c483b9a88ee6bbf16ba5c951ee1ecfcf723078e0d" -dependencies = [ - "byteorder", - "crunchy", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "unicode-normalization" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" -dependencies = [ - "smallvec 1.2.0", -] - -[[package]] -name = "unicode-segmentation" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" - -[[package]] -name = "unicode-xid" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasm-bindgen" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc57ce05287f8376e998cbddfb4c8cb43b84a7ec55cf4551d7c00eef317a47f" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967d37bf6c16cca2973ca3af071d0a2523392e4a594548155d89a678f4237cd" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7add542ea1ac7fdaa9dc25e031a6af33b7d63376292bd24140c637d00d1c312a" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bd151b63e1ea881bb742cd20e1d6127cef28399558f3b5d415289bc41eee3a4" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daf76fe7d25ac79748a37538b7daeed1c7a6867c92d3245c12c6222e4a20d639" - -[[package]] -name = "wasm-timer" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324c5e65a08699c9c4334ba136597ab22b85dccd4b65dd1e36ccf8f723a95b54" -dependencies = [ - "futures", - "js-sys", - "parking_lot 0.9.0", - "pin-utils", - "send_wrapper", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[[package]] -name = "wasmi" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf617d864d25af3587aa745529f7aaa541066c876d57e050c0d0c85c61c92aff" -dependencies = [ - "libc", - "memory_units", - "num-rational", - "num-traits", - "parity-wasm", - "wasmi-validation", -] - -[[package]] -name = "wasmi-validation" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" -dependencies = [ - "parity-wasm", -] - -[[package]] -name = "web-sys" -version = "0.3.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "zeroize" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] diff --git a/frame/staking/fuzzer/Cargo.toml b/frame/staking/fuzzer/Cargo.toml deleted file mode 100644 index 84758c6bf65ce..0000000000000 --- a/frame/staking/fuzzer/Cargo.toml +++ /dev/null @@ -1,41 +0,0 @@ -[package] -name = "pallet-staking-fuzz" -version = "0.0.0" -authors = ["Automatically generated"] -publish = false -edition = "2018" -license = "Apache-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "FRAME pallet staking fuzzing" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -honggfuzz = "0.5" -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -pallet-staking = { version = "3.0.0", path = "..", features = ["runtime-benchmarks"] } -pallet-staking-reward-curve = { version = "3.0.0", path = "../reward-curve" } -pallet-session = { version = "3.0.0", path = "../../session" } -pallet-indices = { version = "3.0.0", path = "../../indices" } -pallet-balances = { version = "3.0.0", path = "../../balances" } -pallet-timestamp = { version = "3.0.0", path = "../../timestamp" } -frame-system = { version = "3.0.0", path = "../../system" } -frame-support = { version = "3.0.0", path = "../../support" } -sp-std = { version = "3.0.0", path = "../../../primitives/std" } -sp-io ={ version = "3.0.0", path = "../../../primitives/io" } -sp-core = { version = "3.0.0", path = "../../../primitives/core" } -sp-npos-elections = { version = "3.0.0", path = "../../../primitives/npos-elections" } -sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } -sp-election-providers = { version = "3.0.0", path = "../../../primitives/election-providers" } -serde = "1.0.101" - -[features] -# Note feature std is required so that impl_opaque_keys derive serde. -default = ["std"] -std = [] - -[[bin]] -name = "submit_solution" -path = "src/submit_solution.rs" diff --git a/frame/staking/fuzzer/src/mock.rs b/frame/staking/fuzzer/src/mock.rs index 05d001d23858e..8fe7975cef068 100644 --- a/frame/staking/fuzzer/src/mock.rs +++ b/frame/staking/fuzzer/src/mock.rs @@ -33,11 +33,11 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Staking: pallet_staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, - Indices: pallet_indices::{Module, Call, Storage, Config, Event}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event, ValidateUnsigned}, + Indices: pallet_indices::{Pallet, Call, Storage, Config, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, } ); @@ -158,18 +158,23 @@ where } pub struct MockElectionProvider; -impl sp_election_providers::ElectionProvider for MockElectionProvider { +impl frame_election_provider_support::ElectionProvider + for MockElectionProvider +{ type Error = (); type DataProvider = pallet_staking::Module; - fn elect() -> Result, Self::Error> { + fn elect() -> Result< + (sp_npos_elections::Supports, frame_support::weights::Weight), + Self::Error + > { Err(()) } } impl pallet_staking::Config for Test { type Currency = Balances; - type UnixTime = pallet_timestamp::Module; + type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); type Event = Event; @@ -180,7 +185,7 @@ impl pallet_staking::Config for Test { type SlashCancelOrigin = frame_system::EnsureRoot; type BondingDuration = (); type SessionInterface = Self; - type RewardCurve = RewardCurve; + type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type ElectionLookahead = (); type Call = Call; diff --git a/frame/staking/fuzzer/src/submit_solution.rs b/frame/staking/fuzzer/src/submit_solution.rs deleted file mode 100644 index 63ec189d44b07..0000000000000 --- a/frame/staking/fuzzer/src/submit_solution.rs +++ /dev/null @@ -1,183 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Fuzzing for staking pallet. -//! -//! HFUZZ_RUN_ARGS="-n 8" cargo hfuzz run submit_solution - -use honggfuzz::fuzz; - -use mock::Test; -use pallet_staking::testing_utils::*; -use frame_support::{assert_ok, storage::StorageValue, traits::UnfilteredDispatchable}; -use frame_system::RawOrigin; -use sp_runtime::DispatchError; -use sp_core::offchain::{testing::TestOffchainExt, OffchainWorkerExt, OffchainDbExt}; -use pallet_staking::{EraElectionStatus, ElectionStatus, Module as Staking, Call as StakingCall}; - -mod mock; - -#[repr(u32)] -#[allow(dead_code)] -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum Mode { - /// Initial submission. This will be rather cheap. - InitialSubmission, - /// A better submission that will replace the previous ones. This is the most expensive. - StrongerSubmission, - /// A weak submission that will be rejected. This will be rather cheap. - WeakerSubmission, -} - -pub fn new_test_ext(iterations: u32) -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = frame_system::GenesisConfig::default() - .build_storage::() - .map(Into::into) - .expect("Failed to create test externalities."); - - let (offchain, offchain_state) = TestOffchainExt::new(); - - let mut seed = [0u8; 32]; - seed[0..4].copy_from_slice(&iterations.to_le_bytes()); - offchain_state.write().seed = seed; - - ext.register_extension(OffchainDbExt::new(offchain.clone())); - ext.register_extension(OffchainWorkerExt::new(offchain)); - - ext -} - -fn main() { - let to_range = |x: u32, a: u32, b: u32| { - let collapsed = x % b; - if collapsed >= a { - collapsed - } else { - collapsed + a - } - }; - loop { - fuzz!(|data: (u32, u32, u32, u32, u32)| { - let (mut num_validators, mut num_nominators, mut edge_per_voter, mut to_elect, mode_u32) = data; - // always run with 5 iterations. - let mut ext = new_test_ext(5); - let mode: Mode = unsafe { std::mem::transmute(mode_u32) }; - num_validators = to_range(num_validators, 50, 1000); - num_nominators = to_range(num_nominators, 50, 2000); - edge_per_voter = to_range(edge_per_voter, 1, 16); - to_elect = to_range(to_elect, 20, num_validators); - - let do_reduce = true; - - println!("+++ instance with params {} / {} / {} / {} / {:?}({})", - num_nominators, - num_validators, - edge_per_voter, - to_elect, - mode, - mode_u32, - ); - - ext.execute_with(|| { - // initial setup - init_active_era(); - - assert_ok!(create_validators_with_nominators_for_era::( - num_validators, - num_nominators, - edge_per_voter as usize, - true, - None, - )); - - >::put(ElectionStatus::Open(1)); - assert!(>::create_stakers_snapshot().0); - - let origin = RawOrigin::Signed(create_funded_user::("fuzzer", 0, 100)); - - // stuff to submit - let (winners, compact, score, size) = match mode { - Mode::InitialSubmission => { - // No need to setup anything - get_seq_phragmen_solution::(do_reduce) - }, - Mode::StrongerSubmission => { - let (winners, compact, score, size) = get_weak_solution::(false); - println!("Weak on chain score = {:?}", score); - assert_ok!( - >::submit_election_solution( - origin.clone().into(), - winners, - compact, - score, - current_era::(), - size, - ) - ); - get_seq_phragmen_solution::(do_reduce) - }, - Mode::WeakerSubmission => { - let (winners, compact, score, size) = get_seq_phragmen_solution::(do_reduce); - println!("Strong on chain score = {:?}", score); - assert_ok!( - >::submit_election_solution( - origin.clone().into(), - winners, - compact, - score, - current_era::(), - size, - ) - ); - get_weak_solution::(false) - } - }; - - // must have chosen correct number of winners. - assert_eq!(winners.len() as u32, >::validator_count()); - - // final call and origin - let call = StakingCall::::submit_election_solution( - winners, - compact, - score, - current_era::(), - size, - ); - - // actually submit - match mode { - Mode::WeakerSubmission => { - assert_eq!( - call.dispatch_bypass_filter(origin.into()).unwrap_err().error, - DispatchError::Module { - index: 2, - error: 16, - message: Some("OffchainElectionWeakSubmission"), - }, - ); - }, - // NOTE: so exhaustive pattern doesn't work here.. maybe some rust issue? - // or due to `#[repr(u32)]`? - Mode::InitialSubmission | Mode::StrongerSubmission => { - assert_ok!(call.dispatch_bypass_filter(origin.into())); - } - }; - }) - }); - } -} diff --git a/frame/staking/reward-fn/Cargo.toml b/frame/staking/reward-fn/Cargo.toml new file mode 100644 index 0000000000000..15b17a5e716c3 --- /dev/null +++ b/frame/staking/reward-fn/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "pallet-staking-reward-fn" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Reward function for FRAME staking pallet" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lib] + +[dependencies] +sp-arithmetic = { version = "3.0.0", default-features = false, path = "../../../primitives/arithmetic" } +log = { version = "0.4.14", default-features = false } + +[features] +default = ["std"] +std = [ + "sp-arithmetic/std", + "log/std", +] diff --git a/frame/staking/reward-fn/src/lib.rs b/frame/staking/reward-fn/src/lib.rs new file mode 100644 index 0000000000000..205f0207673a3 --- /dev/null +++ b/frame/staking/reward-fn/src/lib.rs @@ -0,0 +1,236 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +//! Useful function for inflation for nominated proof of stake. + +use sp_arithmetic::{Perquintill, PerThing, biguint::BigUint, traits::{Zero, SaturatedConversion}}; +use core::convert::TryFrom; + +/// Compute yearly inflation using function +/// +/// ```ignore +/// I(x) = for x between 0 and x_ideal: x / x_ideal, +/// for x between x_ideal and 1: 2^((x_ideal - x) / d) +/// ``` +/// +/// where: +/// * x is the stake rate, i.e. fraction of total issued tokens that actively staked behind +/// validators. +/// * d is the falloff or `decay_rate` +/// * x_ideal: the ideal stake rate. +/// +/// The result is meant to be scaled with minimum inflation and maximum inflation. +/// +/// (as detailed +/// [here](https://research.web3.foundation/en/latest/polkadot/economics/1-token-economics.html#inflation-model-with-parachains)) +/// +/// Arguments are: +/// * `stake`: +/// The fraction of total issued tokens that actively staked behind +/// validators. Known as `x` in the literature. +/// Must be between 0 and 1. +/// * `ideal_stake`: +/// The fraction of total issued tokens that should be actively staked behind +/// validators. Known as `x_ideal` in the literature. +/// Must be between 0 and 1. +/// * `falloff`: +/// Known as `decay_rate` in the literature. A co-efficient dictating the strength of +/// the global incentivization to get the `ideal_stake`. A higher number results in less typical +/// inflation at the cost of greater volatility for validators. +/// Must be more than 0.01. +pub fn compute_inflation( + stake: P, + ideal_stake: P, + falloff: P, +) -> P { + if stake < ideal_stake { + // ideal_stake is more than 0 because it is strictly more than stake + return stake / ideal_stake + } + + if falloff < P::from_percent(1.into()) { + log::error!("Invalid inflation computation: falloff less than 1% is not supported"); + return PerThing::zero() + } + + let accuracy = { + let mut a = BigUint::from(Into::::into(P::ACCURACY)); + a.lstrip(); + a + }; + + let mut falloff = BigUint::from(falloff.deconstruct().into()); + falloff.lstrip(); + + let ln2 = { + /// `ln(2)` expressed in as perquintillionth. + const LN2: u64 = 0_693_147_180_559_945_309; + let ln2 = P::from_rational(LN2.into(), Perquintill::ACCURACY.into()); + BigUint::from(ln2.deconstruct().into()) + }; + + // falloff is stripped above. + let ln2_div_d = div_by_stripped(ln2.mul(&accuracy), &falloff); + + let inpos_param = INPoSParam { + x_ideal: BigUint::from(ideal_stake.deconstruct().into()), + x: BigUint::from(stake.deconstruct().into()), + accuracy, + ln2_div_d, + }; + + let res = compute_taylor_serie_part(&inpos_param); + + match u128::try_from(res.clone()) { + Ok(res) if res <= Into::::into(P::ACCURACY) => { + P::from_parts(res.saturated_into()) + }, + // If result is beyond bounds there is nothing we can do + _ => { + log::error!("Invalid inflation computation: unexpected result {:?}", res); + P::zero() + }, + } +} + + +/// Internal struct holding parameter info alongside other cached value. +/// +/// All expressed in part from `accuracy` +struct INPoSParam { + ln2_div_d: BigUint, + x_ideal: BigUint, + x: BigUint, + /// Must be stripped and have no leading zeros. + accuracy: BigUint, +} + +/// Compute `2^((x_ideal - x) / d)` using taylor serie. +/// +/// x must be strictly more than x_ideal. +/// +/// result is expressed with accuracy `INPoSParam.accuracy` +fn compute_taylor_serie_part(p: &INPoSParam) -> BigUint { + // The last computed taylor term. + let mut last_taylor_term = p.accuracy.clone(); + + // Whereas taylor sum is positive. + let mut taylor_sum_positive = true; + + // The sum of all taylor term. + let mut taylor_sum = last_taylor_term.clone(); + + for k in 1..300 { + last_taylor_term = compute_taylor_term(k, &last_taylor_term, p); + + if last_taylor_term.is_zero() { + break + } + + let last_taylor_term_positive = k % 2 == 0; + + if taylor_sum_positive == last_taylor_term_positive { + taylor_sum = taylor_sum.add(&last_taylor_term); + } else { + if taylor_sum >= last_taylor_term { + taylor_sum = taylor_sum.sub(&last_taylor_term) + // NOTE: Should never happen as checked above + .unwrap_or_else(|e| e); + } else { + taylor_sum_positive = !taylor_sum_positive; + taylor_sum = last_taylor_term.clone().sub(&taylor_sum) + // NOTE: Should never happen as checked above + .unwrap_or_else(|e| e); + } + } + } + + if !taylor_sum_positive { + return BigUint::zero() + } + + taylor_sum.lstrip(); + taylor_sum +} + +/// Return the absolute value of k-th taylor term of `2^((x_ideal - x))/d` i.e. +/// `((x - x_ideal) * ln(2) / d)^k / k!` +/// +/// x must be strictly more x_ideal. +/// +/// We compute the term from the last term using this formula: +/// +/// `((x - x_ideal) * ln(2) / d)^k / k! == previous_term * (x - x_ideal) * ln(2) / d / k` +/// +/// `previous_taylor_term` and result are expressed with accuracy `INPoSParam.accuracy` +fn compute_taylor_term(k: u32, previous_taylor_term: &BigUint, p: &INPoSParam) -> BigUint { + let x_minus_x_ideal = p.x.clone().sub(&p.x_ideal) + // NOTE: Should never happen, as x must be more than x_ideal + .unwrap_or_else(|_| BigUint::zero()); + + let res = previous_taylor_term.clone() + .mul(&x_minus_x_ideal) + .mul(&p.ln2_div_d) + .div_unit(k); + + // p.accuracy is stripped by definition. + let res = div_by_stripped(res, &p.accuracy); + let mut res = div_by_stripped(res, &p.accuracy); + + res.lstrip(); + res +} + +/// Compute a div b. +/// +/// requires `b` to be stripped and have no leading zeros. +fn div_by_stripped(mut a: BigUint, b: &BigUint) -> BigUint { + a.lstrip(); + + if b.len() == 0 { + log::error!("Computation error: Invalid division"); + return BigUint::zero() + } + + if b.len() == 1 { + return a.div_unit(b.checked_get(0).unwrap_or(1)) + } + + if b.len() > a.len() { + return BigUint::zero() + } + + if b.len() == a.len() { + // 100_000^2 is more than 2^32-1, thus `new_a` has more limbs than `b`. + let mut new_a = a.mul(&BigUint::from(100_000u64.pow(2))); + new_a.lstrip(); + + debug_assert!(new_a.len() > b.len()); + return new_a + .div(b, false) + .map(|res| res.0) + .unwrap_or_else(|| BigUint::zero()) + .div_unit(100_000) + .div_unit(100_000) + } + + a.div(b, false) + .map(|res| res.0) + .unwrap_or_else(|| BigUint::zero()) +} diff --git a/frame/staking/reward-fn/tests/test.rs b/frame/staking/reward-fn/tests/test.rs new file mode 100644 index 0000000000000..32daf9d09a76d --- /dev/null +++ b/frame/staking/reward-fn/tests/test.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_arithmetic::{PerThing, Perbill, PerU16, Percent, Perquintill}; + +/// This test the precision and panics if error too big error. +/// +/// error is asserted to be less or equal to 8/accuracy or 8*f64::EPSILON +fn test_precision(stake: P, ideal_stake: P, falloff: P) { + let accuracy_f64 = Into::::into(P::ACCURACY) as f64; + let res = pallet_staking_reward_fn::compute_inflation(stake, ideal_stake, falloff); + let res = Into::::into(res.deconstruct()) as f64 / accuracy_f64; + + let expect = float_i_npos(stake, ideal_stake, falloff); + + let error = (res - expect).abs(); + + if error > 8f64 / accuracy_f64 && error > 8.0 * f64::EPSILON { + panic!( + "stake: {:?}, ideal_stake: {:?}, falloff: {:?}, res: {}, expect: {}", + stake, ideal_stake, falloff, res , expect + ); + } +} + +/// compute the inflation using floats +fn float_i_npos(stake: P, ideal_stake: P, falloff: P) -> f64 { + let accuracy_f64 = Into::::into(P::ACCURACY) as f64; + + let ideal_stake = Into::::into(ideal_stake.deconstruct()) as f64 / accuracy_f64; + let stake = Into::::into(stake.deconstruct()) as f64 / accuracy_f64; + let falloff = Into::::into(falloff.deconstruct()) as f64 / accuracy_f64; + + let x_ideal = ideal_stake; + let x = stake; + let d = falloff; + + if x < x_ideal { + x / x_ideal + } else { + 2_f64.powf((x_ideal - x) / d) + } +} + +#[test] +fn test_precision_for_minimum_falloff() { + fn test_falloff_precision_for_minimum_falloff() { + for stake in 0..1_000 { + let stake = P::from_rational(stake, 1_000); + let ideal_stake = P::zero(); + let falloff = P::from_rational(1, 100); + test_precision(stake, ideal_stake, falloff); + } + } + + test_falloff_precision_for_minimum_falloff::(); + + test_falloff_precision_for_minimum_falloff::(); + + test_falloff_precision_for_minimum_falloff::(); + + test_falloff_precision_for_minimum_falloff::(); +} + +#[test] +fn compute_inflation_works() { + fn compute_inflation_works() { + for stake in 0..100 { + for ideal_stake in 0..10 { + for falloff in 1..10 { + let stake = P::from_rational(stake, 100); + let ideal_stake = P::from_rational(ideal_stake, 10); + let falloff = P::from_rational(falloff, 100); + test_precision(stake, ideal_stake, falloff); + } + } + } + } + + compute_inflation_works::(); + + compute_inflation_works::(); + + compute_inflation_works::(); + + compute_inflation_works::(); +} diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index ecaa9889b5fb2..1d8a5c1fd6451 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -21,16 +21,12 @@ use super::*; use crate::Module as Staking; use testing_utils::*; -use sp_npos_elections::CompactSolution; use sp_runtime::traits::One; use frame_system::RawOrigin; pub use frame_benchmarking::{ - benchmarks, - account, - whitelisted_caller, - whitelist_account, - impl_benchmark_test_suite, + benchmarks, account, whitelisted_caller, whitelist_account, impl_benchmark_test_suite, }; + const SEED: u32 = 0; const MAX_SPANS: u32 = 100; const MAX_VALIDATORS: u32 = 1000; @@ -97,8 +93,8 @@ pub fn create_validator_with_nominators( // Start a new Era let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); - assert!(new_validators.len() == 1); - assert!(new_validators[0] == v_stash, "Our validator was not selected!"); + assert_eq!(new_validators.len(), 1); + assert_eq!(new_validators[0], v_stash, "Our validator was not selected!"); // Give Era Points let reward = EraRewardPoints:: { @@ -208,16 +204,20 @@ benchmarks! { kick { // scenario: we want to kick `k` nominators from nominating us (we are a validator). // we'll assume that `k` is under 128 for the purposes of determining the slope. - // each nominator should have `MAX_NOMINATIONS` validators nominated, and our validator + // each nominator should have `T::MAX_NOMINATIONS` validators nominated, and our validator // should be somewhere in there. let k in 1 .. 128; - // these are the other validators; there are `MAX_NOMINATIONS - 1` of them, so there are a - // total of `MAX_NOMINATIONS` validators in the system. - let rest_of_validators = create_validators::(MAX_NOMINATIONS as u32 - 1, 100)?; + // these are the other validators; there are `T::MAX_NOMINATIONS - 1` of them, so + // there are a total of `T::MAX_NOMINATIONS` validators in the system. + let rest_of_validators = create_validators::(T::MAX_NOMINATIONS - 1, 100)?; // this is the validator that will be kicking. - let (stash, controller) = create_stash_controller::(MAX_NOMINATIONS as u32 - 1, 100, Default::default())?; + let (stash, controller) = create_stash_controller::( + T::MAX_NOMINATIONS - 1, + 100, + Default::default(), + )?; let stash_lookup: ::Source = T::Lookup::unlookup(stash.clone()); // they start validating. @@ -228,7 +228,11 @@ benchmarks! { let mut nominator_stashes = Vec::with_capacity(k as usize); for i in 0 .. k { // create a nominator stash. - let (n_stash, n_controller) = create_stash_controller::(MAX_NOMINATIONS as u32 + i, 100, Default::default())?; + let (n_stash, n_controller) = create_stash_controller::( + T::MAX_NOMINATIONS + i, + 100, + Default::default(), + )?; // bake the nominations; we first clone them from the rest of the validators. let mut nominations = rest_of_validators.clone(); @@ -260,9 +264,9 @@ benchmarks! { } } - // Worst case scenario, MAX_NOMINATIONS + // Worst case scenario, T::MAX_NOMINATIONS nominate { - let n in 1 .. MAX_NOMINATIONS as u32; + let n in 1 .. T::MAX_NOMINATIONS; let (stash, controller) = create_stash_controller::(n + 1, 100, Default::default())?; let validators = create_validators::(n, 100)?; whitelist_account!(controller); @@ -471,7 +475,13 @@ benchmarks! { let v in 1 .. 10; let n in 1 .. 100; - create_validators_with_nominators_for_era::(v, n, MAX_NOMINATIONS, false, None)?; + create_validators_with_nominators_for_era::( + v, + n, + ::MAX_NOMINATIONS as usize, + false, + None, + )?; let session_index = SessionIndex::one(); }: { let validators = Staking::::new_era(session_index).ok_or("`new_era` failed")?; @@ -482,7 +492,13 @@ benchmarks! { payout_all { let v in 1 .. 10; let n in 1 .. 100; - create_validators_with_nominators_for_era::(v, n, MAX_NOMINATIONS, false, None)?; + create_validators_with_nominators_for_era::( + v, + n, + ::MAX_NOMINATIONS as usize, + false, + None, + )?; // Start a new Era let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); assert!(new_validators.len() == v as usize); @@ -544,229 +560,37 @@ benchmarks! { assert!(balance_before > balance_after); } - // This benchmark create `v` validators intent, `n` nominators intent, in total creating `e` - // edges. - #[extra] - submit_solution_initial { - // number of validator intention. This will be equal to `ElectionSize::validators`. - let v in 200 .. 400; - // number of nominator intention. This will be equal to `ElectionSize::nominators`. - let n in 500 .. 1000; - // number of assignments. Basically, number of active nominators. This will be equal to - // `compact.len()`. - let a in 200 .. 400; - // number of winners, also ValidatorCount. This will be equal to `winner.len()`. - let w in 16 .. 100; - - ensure!(w as usize >= MAX_NOMINATIONS, "doesn't support lower value"); - - let winners = create_validators_with_nominators_for_era::( - v, - n, - MAX_NOMINATIONS, - false, - Some(w), - )?; - - // needed for the solution to be generates. - assert!(>::create_stakers_snapshot().0); - - // set number of winners - ValidatorCount::put(w); - - // create a assignments in total for the w winners. - let (winners, assignments) = create_assignments_for_offchain::(a, winners)?; - - let ( - winners, - compact, - score, - size - ) = offchain_election::prepare_submission::( - assignments, - winners, - false, - T::BlockWeights::get().max_block, - ).unwrap(); - - assert_eq!( - winners.len(), compact.unique_targets().len(), - "unique targets ({}) and winners ({}) count not same. This solution is not valid.", - compact.unique_targets().len(), - winners.len(), - ); - - // needed for the solution to be accepted - >::put(ElectionStatus::Open(T::BlockNumber::from(1u32))); - - let era = >::current_era().unwrap_or(0); - let caller: T::AccountId = account("caller", n, SEED); - whitelist_account!(caller); - }: { - let result = >::submit_election_solution( - RawOrigin::Signed(caller.clone()).into(), - winners, - compact, - score.clone(), - era, - size, - ); - assert!(result.is_ok()); - } - verify { - // new solution has been accepted. - assert_eq!(>::queued_score().unwrap(), score); - } - - // same as submit_solution_initial but we place a very weak solution on chian first. - submit_solution_better { + get_npos_voters { // number of validator intention. let v in 200 .. 400; // number of nominator intention. - let n in 500 .. 1000; - // number of assignments. Basically, number of active nominators. - let a in 200 .. 400; - // number of winners, also ValidatorCount. - let w in 16 .. 100; + let n in 200 .. 400; + // total number of slashing spans. Assigned to validators randomly. + let s in 1 .. 20; - ensure!(w as usize >= MAX_NOMINATIONS, "doesn't support lower value"); - - let winners = create_validators_with_nominators_for_era::( - v, - n, - MAX_NOMINATIONS, - false, - Some(w), - )?; - - // needed for the solution to be generates. - assert!(>::create_stakers_snapshot().0); - - // set number of winners - ValidatorCount::put(w); - - // create a assignments in total for the w winners. - let (winners, assignments) = create_assignments_for_offchain::(a, winners)?; - - let single_winner = winners[0].0.clone(); - - let ( - winners, - compact, - score, - size - ) = offchain_election::prepare_submission::( - assignments, - winners, - false, - T::BlockWeights::get().max_block, - ).unwrap(); - - assert_eq!( - winners.len(), compact.unique_targets().len(), - "unique targets ({}) and winners ({}) count not same. This solution is not valid.", - compact.unique_targets().len(), - winners.len(), - ); + let validators = create_validators_with_nominators_for_era::(v, n, T::MAX_NOMINATIONS as usize, false, None)? + .into_iter() + .map(|v| T::Lookup::lookup(v).unwrap()) + .collect::>(); - // needed for the solution to be accepted - >::put(ElectionStatus::Open(T::BlockNumber::from(1u32))); - - let era = >::current_era().unwrap_or(0); - let caller: T::AccountId = account("caller", n, SEED); - whitelist_account!(caller); - - // submit a very bad solution on-chain - { - // this is needed to fool the chain to accept this solution. - ValidatorCount::put(1); - let (winners, compact, score, size) = get_single_winner_solution::(single_winner)?; - assert!( - >::submit_election_solution( - RawOrigin::Signed(caller.clone()).into(), - winners, - compact, - score.clone(), - era, - size, - ).is_ok()); - - // new solution has been accepted. - assert_eq!(>::queued_score().unwrap(), score); - ValidatorCount::put(w); - } + (0..s).for_each(|index| { + add_slashing_spans::(&validators[index as usize], 10); + }); }: { - let result = >::submit_election_solution( - RawOrigin::Signed(caller.clone()).into(), - winners, - compact, - score.clone(), - era, - size, - ); - assert!(result.is_ok()); - } - verify { - // new solution has been accepted. - assert_eq!(>::queued_score().unwrap(), score); + let voters = >::get_npos_voters(); + assert_eq!(voters.len() as u32, v + n); } - // This will be early rejected based on the score. - #[extra] - submit_solution_weaker { + get_npos_targets { // number of validator intention. let v in 200 .. 400; // number of nominator intention. - let n in 500 .. 1000; - - create_validators_with_nominators_for_era::(v, n, MAX_NOMINATIONS, false, None)?; - - // needed for the solution to be generates. - assert!(>::create_stakers_snapshot().0); - - // needed for the solution to be accepted - >::put(ElectionStatus::Open(T::BlockNumber::from(1u32))); - let era = >::current_era().unwrap_or(0); - let caller: T::AccountId = account("caller", n, SEED); - whitelist_account!(caller); - - // submit a seq-phragmen with all the good stuff on chain. - { - let (winners, compact, score, size) = get_seq_phragmen_solution::(true); - assert_eq!( - winners.len(), compact.unique_targets().len(), - "unique targets ({}) and winners ({}) count not same. This solution is not valid.", - compact.unique_targets().len(), - winners.len(), - ); - assert!( - >::submit_election_solution( - RawOrigin::Signed(caller.clone()).into(), - winners, - compact, - score.clone(), - era, - size, - ).is_ok() - ); - - // new solution has been accepted. - assert_eq!(>::queued_score().unwrap(), score); - } + let n = 500; - // prepare a bad solution. This will be very early rejected. - let (winners, compact, score, size) = get_weak_solution::(true); + let _ = create_validators_with_nominators_for_era::(v, n, T::MAX_NOMINATIONS as usize, false, None)?; }: { - assert!( - >::submit_election_solution( - RawOrigin::Signed(caller.clone()).into(), - winners, - compact, - score.clone(), - era, - size, - ).is_err() - ); + let targets = >::get_npos_targets(); + assert_eq!(targets.len() as u32, v); } } @@ -782,8 +606,13 @@ mod tests { let v = 10; let n = 100; - create_validators_with_nominators_for_era::(v, n, MAX_NOMINATIONS, false, None) - .unwrap(); + create_validators_with_nominators_for_era::( + v, + n, + ::MAX_NOMINATIONS as usize, + false, + None, + ).unwrap(); let count_validators = Validators::::iter().count(); let count_nominators = Nominators::::iter().count(); @@ -866,16 +695,6 @@ mod tests { assert_ok!(closure_to_benchmark()); }); } - - #[test] - #[ignore] - fn test_benchmarks_offchain() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { - assert_ok!(test_benchmark_submit_solution_better::()); - assert_ok!(test_benchmark_submit_solution_weaker::()); - }); - } - } impl_benchmark_test_suite!( diff --git a/frame/staking/src/inflation.rs b/frame/staking/src/inflation.rs index bd9d1f8bbdb30..e5259543fd4ba 100644 --- a/frame/staking/src/inflation.rs +++ b/frame/staking/src/inflation.rs @@ -38,7 +38,7 @@ pub fn compute_total_payout( // Milliseconds per year for the Julian year (365.25 days). const MILLISECONDS_PER_YEAR: u64 = 1000 * 3600 * 24 * 36525 / 100; - let portion = Perbill::from_rational_approximation(era_duration as u64, MILLISECONDS_PER_YEAR); + let portion = Perbill::from_rational(era_duration as u64, MILLISECONDS_PER_YEAR); let payout = portion * yearly_inflation.calculate_for_fraction_times_denominator( npos_token_staked, total_tokens.clone(), diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 05511be63bb07..c28dbc87bccdd 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -19,9 +19,9 @@ //! //! The Staking module is used to manage funds at stake by network maintainers. //! -//! - [`staking::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Module`] //! //! ## Overview //! @@ -65,16 +65,16 @@ //! is paired with an active **controller** account, which issues instructions on how they shall be //! used. //! -//! An account pair can become bonded using the [`bond`](./enum.Call.html#variant.bond) call. +//! An account pair can become bonded using the [`bond`](Call::bond) call. //! //! Stash accounts can change their associated controller using the -//! [`set_controller`](./enum.Call.html#variant.set_controller) call. +//! [`set_controller`](Call::set_controller) call. //! //! There are three possible roles that any staked account pair can be in: `Validator`, `Nominator` -//! and `Idle` (defined in [`StakerStatus`](./enum.StakerStatus.html)). There are three +//! and `Idle` (defined in [`StakerStatus`]). There are three //! corresponding instructions to change between roles, namely: -//! [`validate`](./enum.Call.html#variant.validate), -//! [`nominate`](./enum.Call.html#variant.nominate), and [`chill`](./enum.Call.html#variant.chill). +//! [`validate`](Call::validate), +//! [`nominate`](Call::nominate), and [`chill`](Call::chill). //! //! #### Validating //! @@ -86,7 +86,7 @@ //! by nominators and their votes. //! //! An account can become a validator candidate via the -//! [`validate`](./enum.Call.html#variant.validate) call. +//! [`validate`](Call::validate) call. //! //! #### Nomination //! @@ -98,7 +98,7 @@ //! the misbehaving/offline validators as much as possible, simply because the nominators will also //! lose funds if they vote poorly. //! -//! An account can become a nominator via the [`nominate`](enum.Call.html#variant.nominate) call. +//! An account can become a nominator via the [`nominate`](Call::nominate) call. //! //! #### Rewards and Slash //! @@ -127,7 +127,7 @@ //! This means that if they are a nominator, they will not be considered as voters anymore and if //! they are validators, they will no longer be a candidate for the next election. //! -//! An account can step back via the [`chill`](enum.Call.html#variant.chill) call. +//! An account can step back via the [`chill`](Call::chill) call. //! //! ### Session managing //! @@ -175,7 +175,7 @@ //! ### Era payout //! //! The era payout is computed using yearly inflation curve defined at -//! [`T::RewardCurve`](./trait.Config.html#associatedtype.RewardCurve) as such: +//! [`Config::EraPayout`] as such: //! //! ```nocompile //! staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year @@ -186,7 +186,7 @@ //! remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout //! ``` //! The remaining reward is send to the configurable end-point -//! [`T::RewardRemainder`](./trait.Config.html#associatedtype.RewardRemainder). +//! [`Config::RewardRemainder`]. //! //! ### Reward Calculation //! @@ -198,29 +198,28 @@ //! //! Total reward is split among validators and their nominators depending on the number of points //! they received during the era. Points are added to a validator using -//! [`reward_by_ids`](./enum.Call.html#variant.reward_by_ids) or -//! [`reward_by_indices`](./enum.Call.html#variant.reward_by_indices). +//! [`reward_by_ids`](Module::reward_by_ids). //! -//! [`Module`](./struct.Module.html) implements -//! [`pallet_authorship::EventHandler`](../pallet_authorship/trait.EventHandler.html) to add reward +//! [`Module`] implements +//! [`pallet_authorship::EventHandler`] to add reward //! points to block producer and block producer of referenced uncles. //! //! The validator and its nominator split their reward as following: //! //! The validator can declare an amount, named -//! [`commission`](./struct.ValidatorPrefs.html#structfield.commission), that does not get shared +//! [`commission`](ValidatorPrefs::commission), that does not get shared //! with the nominators at each reward payout through its -//! [`ValidatorPrefs`](./struct.ValidatorPrefs.html). This value gets deducted from the total reward +//! [`ValidatorPrefs`]. This value gets deducted from the total reward //! that is paid to the validator and its nominators. The remaining portion is split among the //! validator and all of the nominators that nominated the validator, proportional to the value //! staked behind this validator (_i.e._ dividing the -//! [`own`](./struct.Exposure.html#structfield.own) or -//! [`others`](./struct.Exposure.html#structfield.others) by -//! [`total`](./struct.Exposure.html#structfield.total) in [`Exposure`](./struct.Exposure.html)). +//! [`own`](Exposure::own) or +//! [`others`](Exposure::others) by +//! [`total`](Exposure::total) in [`Exposure`]). //! //! All entities who receive a reward have the option to choose their reward destination through the -//! [`Payee`](./struct.Payee.html) storage item (see -//! [`set_payee`](enum.Call.html#variant.set_payee)), to be one of the following: +//! [`Payee`] storage item (see +//! [`set_payee`](Call::set_payee)), to be one of the following: //! //! - Controller account, (obviously) not increasing the staked value. //! - Stash account, not increasing the staked value. @@ -231,15 +230,15 @@ //! Any funds already placed into stash can be the target of the following operations: //! //! The controller account can free a portion (or all) of the funds using the -//! [`unbond`](enum.Call.html#variant.unbond) call. Note that the funds are not immediately +//! [`unbond`](Call::unbond) call. Note that the funds are not immediately //! accessible. Instead, a duration denoted by -//! [`BondingDuration`](./trait.Config.html#associatedtype.BondingDuration) (in number of eras) must +//! [`Config::BondingDuration`] (in number of eras) must //! pass until the funds can actually be removed. Once the `BondingDuration` is over, the -//! [`withdraw_unbonded`](./enum.Call.html#variant.withdraw_unbonded) call can be used to actually +//! [`withdraw_unbonded`](Call::withdraw_unbonded) call can be used to actually //! withdraw the funds. //! //! Note that there is a limitation to the number of fund-chunks that can be scheduled to be -//! unlocked in the future via [`unbond`](enum.Call.html#variant.unbond). In case this maximum +//! unlocked in the future via [`unbond`](Call::unbond). In case this maximum //! (`MAX_UNLOCKING_CHUNKS`) is reached, the bonded account _must_ first wait until a successful //! call to `withdraw_unbonded` to remove some of the chunks. //! @@ -256,7 +255,7 @@ //! //! ## GenesisConfig //! -//! The Staking module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). The +//! The Staking module depends on the [`GenesisConfig`]. The //! `GenesisConfig` is optional and allow to set some initial stakers. //! //! ## Related Modules @@ -278,7 +277,6 @@ pub mod testing_utils; pub mod benchmarking; pub mod slashing; -pub mod offchain_election; pub mod inflation; pub mod weights; @@ -286,34 +284,29 @@ use sp_std::{ result, prelude::*, collections::btree_map::BTreeMap, - convert::{TryInto, From}, - mem::size_of, + convert::From, }; use codec::{HasCompact, Encode, Decode}; use frame_support::{ decl_module, decl_event, decl_storage, ensure, decl_error, - weights::{Weight, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}}, - storage::IterableStorageMap, - dispatch::{ - DispatchResult, DispatchResultWithPostInfo, DispatchErrorWithPostInfo, - WithPostDispatchInfo, + weights::{ + Weight, WithPostDispatchInfo, + constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}, }, + storage::IterableStorageMap, + dispatch::{DispatchResult, DispatchResultWithPostInfo}, traits::{ Currency, LockIdentifier, LockableCurrency, WithdrawReasons, OnUnbalanced, Imbalance, Get, - UnixTime, EstimateNextNewSession, EnsureOrigin, CurrencyToVote, IsSubType, - } + UnixTime, EstimateNextNewSession, EnsureOrigin, CurrencyToVote, + }, }; use pallet_session::historical; use sp_runtime::{ - Percent, Perbill, PerU16, RuntimeDebug, DispatchError, + Percent, Perbill, RuntimeDebug, DispatchError, curve::PiecewiseLinear, traits::{ Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, - AtLeast32BitUnsigned, Dispatchable, - }, - transaction_validity::{ - TransactionValidityError, TransactionValidity, ValidTransaction, InvalidTransaction, - TransactionSource, TransactionPriority, + AtLeast32BitUnsigned, }, }; use sp_staking::{ @@ -323,15 +316,10 @@ use sp_staking::{ #[cfg(feature = "std")] use sp_runtime::{Serialize, Deserialize}; use frame_system::{ - self as system, ensure_signed, ensure_root, ensure_none, + self as system, ensure_signed, ensure_root, offchain::SendTransactionTypes, }; -use sp_npos_elections::{ - ExtendedBalance, Assignment, ElectionScore, ElectionResult as PrimitiveElectionResult, - to_supports, EvaluateSupport, seq_phragmen, generate_solution_type, is_score_better, Supports, - VoteWeight, CompactSolution, PerThing128, -}; -use sp_election_providers::ElectionProvider; +use frame_election_provider_support::{ElectionProvider, VoteWeight, Supports, data_provider}; pub use weights::WeightInfo; const STAKING_ID: LockIdentifier = *b"staking "; @@ -343,29 +331,11 @@ macro_rules! log { ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { log::$level!( target: crate::LOG_TARGET, - concat!("💸 ", $patter) $(, $values)* + concat!("[{:?}] 💸 ", $patter), >::block_number() $(, $values)* ) }; } -/// Data type used to index nominators in the compact type -pub type NominatorIndex = u32; - -/// Data type used to index validators in the compact type. -pub type ValidatorIndex = u16; - -// Ensure the size of both ValidatorIndex and NominatorIndex. They both need to be well below usize. -static_assertions::const_assert!(size_of::() <= size_of::()); -static_assertions::const_assert!(size_of::() <= size_of::()); -static_assertions::const_assert!(size_of::() <= size_of::()); -static_assertions::const_assert!(size_of::() <= size_of::()); - -/// Maximum number of stakers that can be stored in a snapshot. -pub(crate) const MAX_VALIDATORS: usize = ValidatorIndex::max_value() as usize; -pub(crate) const MAX_NOMINATORS: usize = NominatorIndex::max_value() as usize; -pub const MAX_NOMINATIONS: usize = - ::LIMIT; - pub const MAX_UNLOCKING_CHUNKS: usize = 32; /// Counter for the number of eras that have passed. @@ -374,18 +344,6 @@ pub type EraIndex = u32; /// Counter for the number of "reward" points earned by a given validator. pub type RewardPoint = u32; -// Note: Maximum nomination limit is set here -- 16. -generate_solution_type!( - #[compact] - pub struct CompactAssignments::(16) -); - -/// Accuracy used for on-chain election. -pub type ChainAccuracy = Perbill; - -/// Accuracy used for off-chain election. This better be small. -pub type OffchainAccuracy = PerU16; - /// The balance type of this module. pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -670,78 +628,6 @@ pub struct UnappliedSlash { payout: Balance, } -/// Indicate how an election round was computed. -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] -pub enum ElectionCompute { - /// Result was forcefully computed on chain at the end of the session. - OnChain, - /// Result was submitted and accepted to the chain via a signed transaction. - Signed, - /// Result was submitted and accepted to the chain via an unsigned transaction (by an - /// authority). - Unsigned, -} - -/// The result of an election round. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] -pub struct ElectionResult { - /// Flat list of validators who have been elected. - elected_stashes: Vec, - /// Flat list of new exposures, to be updated in the [`Exposure`] storage. - exposures: Vec<(AccountId, Exposure)>, - /// Type of the result. This is kept on chain only to track and report the best score's - /// submission type. An optimisation could remove this. - compute: ElectionCompute, -} - -/// The status of the upcoming (offchain) election. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] -pub enum ElectionStatus { - /// Nothing has and will happen for now. submission window is not open. - Closed, - /// The submission window has been open since the contained block number. - Open(BlockNumber), -} - -/// Some indications about the size of the election. This must be submitted with the solution. -/// -/// Note that these values must reflect the __total__ number, not only those that are present in the -/// solution. In short, these should be the same size as the size of the values dumped in -/// `SnapshotValidators` and `SnapshotNominators`. -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, Default)] -pub struct ElectionSize { - /// Number of validators in the snapshot of the current election round. - #[codec(compact)] - pub validators: ValidatorIndex, - /// Number of nominators in the snapshot of the current election round. - #[codec(compact)] - pub nominators: NominatorIndex, -} - - -impl ElectionStatus { - pub fn is_open_at(&self, n: BlockNumber) -> bool { - *self == Self::Open(n) - } - - pub fn is_closed(&self) -> bool { - match self { - Self::Closed => true, - _ => false - } - } - - pub fn is_open(&self) -> bool { - !self.is_closed() - } -} - -impl Default for ElectionStatus { - fn default() -> Self { - Self::Closed - } -} - /// Means for interacting with a specialized version of the `session` trait. /// /// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Config` @@ -782,6 +668,53 @@ impl SessionInterface<::AccountId> for T w } } +/// Handler for determining how much of a balance should be paid out on the current era. +pub trait EraPayout { + /// Determine the payout for this era. + /// + /// Returns the amount to be paid to stakers in this era, as well as whatever else should be + /// paid out ("the rest"). + fn era_payout( + total_staked: Balance, + total_issuance: Balance, + era_duration_millis: u64, + ) -> (Balance, Balance); +} + +impl EraPayout for () { + fn era_payout( + _total_staked: Balance, + _total_issuance: Balance, + _era_duration_millis: u64, + ) -> (Balance, Balance) { + (Default::default(), Default::default()) + } +} + +/// Adaptor to turn a `PiecewiseLinear` curve definition into an `EraPayout` impl, used for +/// backwards compatibility. +pub struct ConvertCurve(sp_std::marker::PhantomData); +impl< + Balance: AtLeast32BitUnsigned + Clone, + T: Get<&'static PiecewiseLinear<'static>>, +> EraPayout for ConvertCurve { + fn era_payout( + total_staked: Balance, + total_issuance: Balance, + era_duration_millis: u64, + ) -> (Balance, Balance) { + let (validator_payout, max_payout) = inflation::compute_total_payout( + &T::get(), + total_staked, + total_issuance, + // Duration of era; more than u64::MAX is rewarded as u64::MAX. + era_duration_millis, + ); + let rest = max_payout.saturating_sub(validator_payout.clone()); + (validator_payout, rest) + } +} + pub trait Config: frame_system::Config + SendTransactionTypes> { /// The staking balance. type Currency: LockableCurrency; @@ -800,13 +733,16 @@ pub trait Config: frame_system::Config + SendTransactionTypes> { type CurrencyToVote: CurrencyToVote>; /// Something that provides the election functionality. - type ElectionProvider: sp_election_providers::ElectionProvider< + type ElectionProvider: frame_election_provider_support::ElectionProvider< Self::AccountId, Self::BlockNumber, // we only accept an election provider that has staking as data provider. DataProvider = Module, >; + /// Maximum number of nominations per nominator. + const MAX_NOMINATIONS: u32; + /// Tokens have been minted and are unused for validator-reward. /// See [Era payout](./index.html#era-payout). type RewardRemainder: OnUnbalanced>; @@ -838,52 +774,19 @@ pub trait Config: frame_system::Config + SendTransactionTypes> { /// Interface for interacting with a session module. type SessionInterface: self::SessionInterface; - /// The NPoS reward curve used to define yearly inflation. + /// The payout for validators and the system for the current era. /// See [Era payout](./index.html#era-payout). - type RewardCurve: Get<&'static PiecewiseLinear<'static>>; + type EraPayout: EraPayout>; /// Something that can estimate the next session change, accurately or as a best effort guess. type NextNewSession: EstimateNextNewSession; - /// The number of blocks before the end of the era from which election submissions are allowed. - /// - /// Setting this to zero will disable the offchain compute and only on-chain seq-phragmen will - /// be used. - /// - /// This is bounded by being within the last session. Hence, setting it to a value more than the - /// length of a session will be pointless. - type ElectionLookahead: Get; - - /// The overarching call type. - type Call: Dispatchable + From> + IsSubType> + Clone; - - /// Maximum number of balancing iterations to run in the offchain submission. - /// - /// If set to 0, balance_solution will not be executed at all. - type MaxIterations: Get; - - /// The threshold of improvement that should be provided for a new solution to be accepted. - type MinSolutionScoreBump: Get; - /// The maximum number of nominators rewarded for each validator. /// /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim /// their reward. This used to limit the i/o cost for the nominator payout. type MaxNominatorRewardedPerValidator: Get; - /// A configuration for base priority of unsigned transactions. - /// - /// This is exposed so that it can be tuned for particular runtime, when - /// multiple pallets send unsigned transactions. - type UnsignedPriority: Get; - - /// Maximum weight that the unsigned transaction can have. - /// - /// Chose this value with care. On one hand, it should be as high as possible, so the solution - /// can contain as many nominators/validators as possible. On the other hand, it should be small - /// enough to fit in the block. - type OffchainSolutionWeightLimit: Get; - /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -917,12 +820,13 @@ enum Releases { V2_0_0, V3_0_0, V4_0_0, - V5_0_0, + V5_0_0, // blockable validators. + V6_0_0, // removal of all storage associated with offchain phragmen. } impl Default for Releases { fn default() -> Self { - Releases::V5_0_0 + Releases::V6_0_0 } } @@ -1087,47 +991,11 @@ decl_storage! { /// This is basically in sync with the call to [`SessionManager::new_session`]. pub CurrentPlannedSession get(fn current_planned_session): SessionIndex; - /// Snapshot of validators at the beginning of the current election window. This should only - /// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`. - /// - /// TWO_PHASE_NOTE: should be removed once we switch to multi-phase. - pub SnapshotValidators get(fn snapshot_validators): Option>; - - /// Snapshot of nominators at the beginning of the current election window. This should only - /// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`. - /// - /// TWO_PHASE_NOTE: should be removed once we switch to multi-phase. - pub SnapshotNominators get(fn snapshot_nominators): Option>; - - /// The next validator set. At the end of an era, if this is available (potentially from the - /// result of an offchain worker), it is immediately used. Otherwise, the on-chain election - /// is executed. - /// - /// TWO_PHASE_NOTE: should be removed once we switch to multi-phase. - pub QueuedElected get(fn queued_elected): Option>>; - - /// The score of the current [`QueuedElected`]. - /// - /// TWO_PHASE_NOTE: should be removed once we switch to multi-phase. - pub QueuedScore get(fn queued_score): Option; - - /// Flag to control the execution of the offchain election. When `Open(_)`, we accept - /// solutions to be submitted. - /// - /// TWO_PHASE_NOTE: should be removed once we switch to multi-phase. - pub EraElectionStatus get(fn era_election_status): ElectionStatus; - - /// True if the current **planned** session is final. Note that this does not take era - /// forcing into account. - /// - /// TWO_PHASE_NOTE: should be removed once we switch to multi-phase. - pub IsCurrentSessionFinal get(fn is_current_session_final): bool = false; - /// True if network has been upgraded to this version. /// Storage version of the pallet. /// - /// This is set to v5.0.0 for new networks. - StorageVersion build(|_: &GenesisConfig| Releases::V5_0_0): Releases; + /// This is set to v6.0.0 for new networks. + StorageVersion build(|_: &GenesisConfig| Releases::V6_0_0): Releases; } add_extra_genesis { config(stakers): @@ -1166,23 +1034,46 @@ decl_storage! { pub mod migrations { use super::*; - #[derive(Decode)] - struct OldValidatorPrefs { - #[codec(compact)] - pub commission: Perbill - } - impl OldValidatorPrefs { - fn upgraded(self) -> ValidatorPrefs { - ValidatorPrefs { - commission: self.commission, - .. Default::default() - } + pub mod v6 { + use super::*; + use frame_support::{traits::Get, weights::Weight, generate_storage_alias}; + + // NOTE: value type doesn't matter, we just set it to () here. + generate_storage_alias!(Staking, SnapshotValidators => Value<()>); + generate_storage_alias!(Staking, SnapshotNominators => Value<()>); + generate_storage_alias!(Staking, QueuedElected => Value<()>); + generate_storage_alias!(Staking, QueuedScore => Value<()>); + generate_storage_alias!(Staking, EraElectionStatus => Value<()>); + generate_storage_alias!(Staking, IsCurrentSessionFinal => Value<()>); + + /// check to execute prior to migration. + pub fn pre_migrate() -> Result<(), &'static str> { + // these may or may not exist. + log!(info, "SnapshotValidators.exits()? {:?}", SnapshotValidators::exists()); + log!(info, "SnapshotNominators.exits()? {:?}", SnapshotNominators::exists()); + log!(info, "QueuedElected.exits()? {:?}", QueuedElected::exists()); + log!(info, "QueuedScore.exits()? {:?}", QueuedScore::exists()); + // these must exist. + assert!(IsCurrentSessionFinal::exists(), "IsCurrentSessionFinal storage item not found!"); + assert!(EraElectionStatus::exists(), "EraElectionStatus storage item not found!"); + Ok(()) + } + + /// Migrate storage to v6. + pub fn migrate() -> Weight { + log!(info, "Migrating staking to Releases::V6_0_0"); + + SnapshotValidators::kill(); + SnapshotNominators::kill(); + QueuedElected::kill(); + QueuedScore::kill(); + EraElectionStatus::kill(); + IsCurrentSessionFinal::kill(); + + StorageVersion::put(Releases::V6_0_0); + log!(info, "Done."); + T::DbWeight::get().writes(6 + 1) } - } - pub fn migrate_to_blockable() -> frame_support::weights::Weight { - Validators::::translate::(|_, p| Some(p.upgraded())); - ErasValidatorPrefs::::translate::(|_, _, p| Some(p.upgraded())); - T::BlockWeights::get().max_block } } @@ -1200,10 +1091,8 @@ decl_event!( /// An old slashing report from a prior era was discarded because it could /// not be processed. \[session_index\] OldSlashingReportDiscarded(SessionIndex), - /// A new set of stakers was elected with the given \[compute\]. - StakingElection(ElectionCompute), - /// A new solution for the upcoming election has been stored. \[compute\] - SolutionStored(ElectionCompute), + /// A new set of stakers was elected. + StakingElection, /// An account has bonded this amount. \[stash, amount\] /// /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, @@ -1252,37 +1141,6 @@ decl_error! { NotSortedAndUnique, /// Rewards for this era have already been claimed for this validator. AlreadyClaimed, - /// The submitted result is received out of the open window. - OffchainElectionEarlySubmission, - /// The submitted result is not as good as the one stored on chain. - OffchainElectionWeakSubmission, - /// The snapshot data of the current window is missing. - SnapshotUnavailable, - /// Incorrect number of winners were presented. - OffchainElectionBogusWinnerCount, - /// One of the submitted winners is not an active candidate on chain (index is out of range - /// in snapshot). - OffchainElectionBogusWinner, - /// Error while building the assignment type from the compact. This can happen if an index - /// is invalid, or if the weights _overflow_. - OffchainElectionBogusCompact, - /// One of the submitted nominators is not an active nominator on chain. - OffchainElectionBogusNominator, - /// One of the submitted nominators has an edge to which they have not voted on chain. - OffchainElectionBogusNomination, - /// One of the submitted nominators has an edge which is submitted before the last non-zero - /// slash of the target. - OffchainElectionSlashedNomination, - /// A self vote must only be originated from a validator to ONLY themselves. - OffchainElectionBogusSelfVote, - /// The submitted result has unknown edges that are not among the presented winners. - OffchainElectionBogusEdge, - /// The claimed score does not match with the one computed from the data. - OffchainElectionBogusScore, - /// The election size is invalid. - OffchainElectionBogusElectionSize, - /// The call is not allowed at the given time due to restrictions of election period. - CallNotAllowed, /// Incorrect previous history depth input provided. IncorrectHistoryDepth, /// Incorrect number of slashing spans provided. @@ -1311,110 +1169,30 @@ decl_module! { /// intervention. const SlashDeferDuration: EraIndex = T::SlashDeferDuration::get(); - /// The number of blocks before the end of the era from which election submissions are allowed. - /// - /// Setting this to zero will disable the offchain compute and only on-chain seq-phragmen will - /// be used. - /// - /// This is bounded by being within the last session. Hence, setting it to a value more than the - /// length of a session will be pointless. - const ElectionLookahead: T::BlockNumber = T::ElectionLookahead::get(); - - /// Maximum number of balancing iterations to run in the offchain submission. - /// - /// If set to 0, balance_solution will not be executed at all. - const MaxIterations: u32 = T::MaxIterations::get(); - - /// The threshold of improvement that should be provided for a new solution to be accepted. - const MinSolutionScoreBump: Perbill = T::MinSolutionScoreBump::get(); - /// The maximum number of nominators rewarded for each validator. /// /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim /// their reward. This used to limit the i/o cost for the nominator payout. const MaxNominatorRewardedPerValidator: u32 = T::MaxNominatorRewardedPerValidator::get(); + /// Maximum number of nominations per nominator. + const MaxNominations: u32 = T::MAX_NOMINATIONS; + type Error = Error; fn deposit_event() = default; - fn on_runtime_upgrade() -> frame_support::weights::Weight { - if StorageVersion::get() == Releases::V4_0_0 { - StorageVersion::put(Releases::V5_0_0); - migrations::migrate_to_blockable::() + fn on_runtime_upgrade() -> Weight { + if StorageVersion::get() == Releases::V5_0_0 { + migrations::v6::migrate::() } else { - 0 + T::DbWeight::get().reads(1) } } - /// sets `ElectionStatus` to `Open(now)` where `now` is the block number at which the - /// election window has opened, if we are at the last session and less blocks than - /// `T::ElectionLookahead` is remaining until the next new session schedule. The offchain - /// worker, if applicable, will execute at the end of the current block, and solutions may - /// be submitted. - fn on_initialize(now: T::BlockNumber) -> Weight { - let mut consumed_weight = 0; - let mut add_weight = |reads, writes, weight| { - consumed_weight += T::DbWeight::get().reads_writes(reads, writes); - consumed_weight += weight; - }; - - if - // if we don't have any ongoing offchain compute. - Self::era_election_status().is_closed() && - // either current session final based on the plan, or we're forcing. - (Self::is_current_session_final() || Self::will_era_be_forced()) - { - let (maybe_next_session_change, estimate_next_new_session_weight) = - T::NextNewSession::estimate_next_new_session(now); - - if let Some(next_session_change) = maybe_next_session_change { - if let Some(remaining) = next_session_change.checked_sub(&now) { - if remaining <= T::ElectionLookahead::get() && !remaining.is_zero() { - // create snapshot. - let (did_snapshot, snapshot_weight) = Self::create_stakers_snapshot(); - add_weight(0, 0, snapshot_weight); - if did_snapshot { - // Set the flag to make sure we don't waste any compute here in the same era - // after we have triggered the offline compute. - >::put( - ElectionStatus::::Open(now) - ); - add_weight(0, 1, 0); - log!(info, "Election window is Open({:?}). Snapshot created", now); - } else { - log!(warn, "Failed to create snapshot at {:?}.", now); - } - } - } - } else { - log!(warn, "Estimating next session change failed."); - } - add_weight(0, 0, estimate_next_new_session_weight) - } - // For `era_election_status`, `is_current_session_final`, `will_era_be_forced` - add_weight(3, 0, 0); - // Additional read from `on_finalize` - add_weight(1, 0, 0); - consumed_weight - } - - /// Check if the current block number is the one at which the election window has been set - /// to open. If so, it runs the offchain worker code. - fn offchain_worker(now: T::BlockNumber) { - use offchain_election::{set_check_offchain_execution_status, compute_offchain_election}; - if Self::era_election_status().is_open_at(now) { - let offchain_status = set_check_offchain_execution_status::(now); - if let Err(why) = offchain_status { - log!(warn, "skipping offchain worker in open election window due to [{}]", why); - } else { - if let Err(e) = compute_offchain_election::() { - log!(error, "Error in election offchain worker: {:?}", e); - } else { - log!(debug, "Executed offchain worker thread without errors."); - } - } - } + fn on_initialize(_now: T::BlockNumber) -> Weight { + // just return the weight of the on_finalize. + T::DbWeight::get().reads(1) } fn on_finalize() { @@ -1439,25 +1217,6 @@ decl_module! { T::BondingDuration::get(), ) ); - - use sp_runtime::UpperOf; - // see the documentation of `Assignment::try_normalize`. Now we can ensure that this - // will always return `Ok`. - // 1. Maximum sum of Vec must fit into `UpperOf`. - assert!( - >>::try_into(MAX_NOMINATIONS) - .unwrap() - .checked_mul(::one().deconstruct().try_into().unwrap()) - .is_some() - ); - - // 2. Maximum sum of Vec must fit into `UpperOf`. - assert!( - >>::try_into(MAX_NOMINATIONS) - .unwrap() - .checked_mul(::one().deconstruct().try_into().unwrap()) - .is_some() - ); } /// Take the origin account as a stash and lock up `value` of its balance. `controller` will @@ -1505,7 +1264,7 @@ decl_module! { Err(Error::::InsufficientValue)? } - system::Module::::inc_consumers(&stash).map_err(|_| Error::::BadState)?; + system::Pallet::::inc_consumers(&stash).map_err(|_| Error::::BadState)?; // You're auto-bonded forever, here. We might improve this by only bonding when // you actually validate/nominate and remove once you unbond __everything__. @@ -1552,7 +1311,6 @@ decl_module! { /// # #[weight = T::WeightInfo::bond_extra()] fn bond_extra(origin, #[compact] max_additional: BalanceOf) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let stash = ensure_signed(origin)?; let controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; @@ -1605,7 +1363,6 @@ decl_module! { /// #[weight = T::WeightInfo::unbond()] fn unbond(origin, #[compact] value: BalanceOf) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; ensure!( @@ -1665,7 +1422,6 @@ decl_module! { /// # #[weight = T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans)] fn withdraw_unbonded(origin, num_slashing_spans: u32) -> DispatchResultWithPostInfo { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let (stash, old_total) = (ledger.stash.clone(), ledger.total); @@ -1720,7 +1476,6 @@ decl_module! { /// # #[weight = T::WeightInfo::validate()] pub fn validate(origin, prefs: ValidatorPrefs) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; @@ -1749,12 +1504,11 @@ decl_module! { /// # #[weight = T::WeightInfo::nominate(targets.len() as u32)] pub fn nominate(origin, targets: Vec<::Source>) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; ensure!(!targets.is_empty(), Error::::EmptyTargets); - ensure!(targets.len() <= MAX_NOMINATIONS, Error::::TooManyTargets); + ensure!(targets.len() <= T::MAX_NOMINATIONS as usize, Error::::TooManyTargets); let old = Nominators::::get(stash).map_or_else(Vec::new, |x| x.targets); @@ -1797,7 +1551,6 @@ decl_module! { /// # #[weight = T::WeightInfo::chill()] fn chill(origin) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; Self::chill_stash(&ledger.stash); @@ -2040,8 +1793,7 @@ decl_module! { /// Paying even a dead controller is cheaper weight-wise. We don't do any refunds here. /// # #[weight = T::WeightInfo::payout_stakers_alive_staked(T::MaxNominatorRewardedPerValidator::get())] - fn payout_stakers(origin, validator_stash: T::AccountId, era: EraIndex) -> DispatchResult { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); + fn payout_stakers(origin, validator_stash: T::AccountId, era: EraIndex) -> DispatchResultWithPostInfo { ensure_signed(origin)?; Self::do_payout_stakers(validator_stash, era) } @@ -2062,7 +1814,6 @@ decl_module! { /// # #[weight = T::WeightInfo::rebond(MAX_UNLOCKING_CHUNKS as u32)] fn rebond(origin, #[compact] value: BalanceOf) -> DispatchResultWithPostInfo { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); @@ -2141,121 +1892,6 @@ decl_module! { T::Currency::remove_lock(STAKING_ID, &stash); } - /// Submit an election result to the chain. If the solution: - /// - /// 1. is valid. - /// 2. has a better score than a potentially existing solution on chain. - /// - /// then, it will be _put_ on chain. - /// - /// A solution consists of two pieces of data: - /// - /// 1. `winners`: a flat vector of all the winners of the round. - /// 2. `assignments`: the compact version of an assignment vector that encodes the edge - /// weights. - /// - /// Both of which may be computed using _phragmen_, or any other algorithm. - /// - /// Additionally, the submitter must provide: - /// - /// - The `score` that they claim their solution has. - /// - /// Both validators and nominators will be represented by indices in the solution. The - /// indices should respect the corresponding types ([`ValidatorIndex`] and - /// [`NominatorIndex`]). Moreover, they should be valid when used to index into - /// [`SnapshotValidators`] and [`SnapshotNominators`]. Any invalid index will cause the - /// solution to be rejected. These two storage items are set during the election window and - /// may be used to determine the indices. - /// - /// A solution is valid if: - /// - /// 0. It is submitted when [`EraElectionStatus`] is `Open`. - /// 1. Its claimed score is equal to the score computed on-chain. - /// 2. Presents the correct number of winners. - /// 3. All indexes must be value according to the snapshot vectors. All edge values must - /// also be correct and should not overflow the granularity of the ratio type (i.e. 256 - /// or billion). - /// 4. For each edge, all targets are actually nominated by the voter. - /// 5. Has correct self-votes. - /// - /// A solutions score is consisted of 3 parameters: - /// - /// 1. `min { support.total }` for each support of a winner. This value should be maximized. - /// 2. `sum { support.total }` for each support of a winner. This value should be minimized. - /// 3. `sum { support.total^2 }` for each support of a winner. This value should be - /// minimized (to ensure less variance) - /// - /// # - /// The transaction is assumed to be the longest path, a better solution. - /// - Initial solution is almost the same. - /// - Worse solution is retraced in pre-dispatch-checks which sets its own weight. - /// # - #[weight = T::WeightInfo::submit_solution_better( - size.validators.into(), - size.nominators.into(), - compact.voter_count() as u32, - winners.len() as u32, - )] - pub fn submit_election_solution( - origin, - winners: Vec, - compact: CompactAssignments, - score: ElectionScore, - era: EraIndex, - size: ElectionSize, - ) -> DispatchResultWithPostInfo { - let _who = ensure_signed(origin)?; - Self::check_and_replace_solution( - winners, - compact, - ElectionCompute::Signed, - score, - era, - size, - ) - } - - /// Unsigned version of `submit_election_solution`. - /// - /// Note that this must pass the [`ValidateUnsigned`] check which only allows transactions - /// from the local node to be included. In other words, only the block author can include a - /// transaction in the block. - /// - /// # - /// See [`submit_election_solution`]. - /// # - #[weight = T::WeightInfo::submit_solution_better( - size.validators.into(), - size.nominators.into(), - compact.voter_count() as u32, - winners.len() as u32, - )] - pub fn submit_election_solution_unsigned( - origin, - winners: Vec, - compact: CompactAssignments, - score: ElectionScore, - era: EraIndex, - size: ElectionSize, - ) -> DispatchResultWithPostInfo { - ensure_none(origin)?; - let adjustments = Self::check_and_replace_solution( - winners, - compact, - ElectionCompute::Unsigned, - score, - era, - size, - ).expect( - "An unsigned solution can only be submitted by validators; A validator should \ - always produce correct solutions, else this block should not be imported, thus \ - effectively depriving the validators from their authoring reward. Hence, this panic - is expected." - ); - - Ok(adjustments) - } - /// Remove the given nominations from the calling validator. /// /// Effects will be felt at the beginning of the next era. @@ -2272,7 +1908,6 @@ decl_module! { #[weight = T::WeightInfo::kick(who.len() as u32)] pub fn kick(origin, who: Vec<::Source>) -> DispatchResult { let controller = ensure_signed(origin)?; - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; @@ -2322,70 +1957,35 @@ impl Module { }) } - /// Dump the list of validators and nominators into vectors and keep them on-chain. - /// - /// This data is used to efficiently evaluate election results. returns `true` if the operation - /// is successful. - pub fn create_stakers_snapshot() -> (bool, Weight) { - let mut consumed_weight = 0; - let mut add_db_reads_writes = |reads, writes| { - consumed_weight += T::DbWeight::get().reads_writes(reads, writes); - }; - let validators = >::iter().map(|(v, _)| v).collect::>(); - let mut nominators = >::iter().map(|(n, _)| n).collect::>(); - - let num_validators = validators.len(); - let num_nominators = nominators.len(); - add_db_reads_writes((num_validators + num_nominators) as Weight, 0); - - if - num_validators > MAX_VALIDATORS || - num_nominators.saturating_add(num_validators) > MAX_NOMINATORS - { - log!( - warn, - "Snapshot size too big [{} <> {}][{} <> {}].", - num_validators, - MAX_VALIDATORS, - num_nominators, - MAX_NOMINATORS, - ); - (false, consumed_weight) - } else { - // all validators nominate themselves; - nominators.extend(validators.clone()); - - >::put(validators); - >::put(nominators); - add_db_reads_writes(0, 2); - (true, consumed_weight) - } - } - - /// Clears both snapshots of stakers. - fn kill_stakers_snapshot() { - >::kill(); - >::kill(); - } - - fn do_payout_stakers(validator_stash: T::AccountId, era: EraIndex) -> DispatchResult { + fn do_payout_stakers(validator_stash: T::AccountId, era: EraIndex) -> DispatchResultWithPostInfo { // Validate input data - let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; - ensure!(era <= current_era, Error::::InvalidEraToReward); + let current_era = CurrentEra::get().ok_or( + Error::::InvalidEraToReward.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + )?; let history_depth = Self::history_depth(); - ensure!(era >= current_era.saturating_sub(history_depth), Error::::InvalidEraToReward); + ensure!( + era <= current_era && era >= current_era.saturating_sub(history_depth), + Error::::InvalidEraToReward.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + ); // Note: if era has no reward to be claimed, era may be future. better not to update // `ledger.claimed_rewards` in this case. let era_payout = >::get(&era) - .ok_or_else(|| Error::::InvalidEraToReward)?; - - let controller = Self::bonded(&validator_stash).ok_or(Error::::NotStash)?; + .ok_or_else(|| + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + )?; + + let controller = Self::bonded(&validator_stash).ok_or( + Error::::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + )?; let mut ledger = >::get(&controller).ok_or_else(|| Error::::NotController)?; ledger.claimed_rewards.retain(|&x| x >= current_era.saturating_sub(history_depth)); match ledger.claimed_rewards.binary_search(&era) { - Ok(_) => Err(Error::::AlreadyClaimed)?, + Ok(_) => Err( + Error::::AlreadyClaimed.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + )?, Err(pos) => ledger.claimed_rewards.insert(pos, era), } @@ -2409,11 +2009,13 @@ impl Module { .unwrap_or_else(|| Zero::zero()); // Nothing to do if they have no reward points. - if validator_reward_points.is_zero() { return Ok(())} + if validator_reward_points.is_zero() { + return Ok(Some(T::WeightInfo::payout_stakers_alive_staked(0)).into()) + } // This is the fraction of the total reward that the validator and the // nominators will get. - let validator_total_reward_part = Perbill::from_rational_approximation( + let validator_total_reward_part = Perbill::from_rational( validator_reward_points, total_reward_points, ); @@ -2428,7 +2030,7 @@ impl Module { let validator_leftover_payout = validator_total_payout - validator_commission_payout; // Now let's calculate how this is split to the validator. - let validator_exposure_part = Perbill::from_rational_approximation( + let validator_exposure_part = Perbill::from_rational( exposure.own, exposure.total, ); @@ -2442,10 +2044,14 @@ impl Module { Self::deposit_event(RawEvent::Reward(ledger.stash, imbalance.peek())); } + // Track the number of payout ops to nominators. Note: `WeightInfo::payout_stakers_alive_staked` + // always assumes at least a validator is paid out, so we do not need to count their payout op. + let mut nominator_payout_count: u32 = 0; + // Lets now calculate how this is split to the nominators. // Reward only the clipped exposures. Note this is not necessarily sorted. for nominator in exposure.others.iter() { - let nominator_exposure_part = Perbill::from_rational_approximation( + let nominator_exposure_part = Perbill::from_rational( nominator.value, exposure.total, ); @@ -2453,11 +2059,14 @@ impl Module { let nominator_reward: BalanceOf = nominator_exposure_part * validator_leftover_payout; // We can now make nominator payout: if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { + // Note: this logic does not count payouts for `RewardDestination::None`. + nominator_payout_count += 1; Self::deposit_event(RawEvent::Reward(nominator.who.clone(), imbalance.peek())); } } - Ok(()) + debug_assert!(nominator_payout_count <= T::MaxNominatorRewardedPerValidator::get()); + Ok(Some(T::WeightInfo::payout_stakers_alive_staked(nominator_payout_count)).into()) } /// Update the ledger for a controller. @@ -2524,18 +2133,15 @@ impl Module { .unwrap_or(0); // Must never happen. match ForceEra::get() { + // Will set to default again, which is `NotForcing`. Forcing::ForceNew => ForceEra::kill(), + // Short circuit to `new_era`. Forcing::ForceAlways => (), + // Only go to `new_era` if deadline reached. Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (), _ => { - // Either `ForceNone`, or `NotForcing && era_length < T::SessionsPerEra::get()`. - if era_length + 1 == T::SessionsPerEra::get() { - IsCurrentSessionFinal::put(true); - } else if era_length >= T::SessionsPerEra::get() { - // Should only happen when we are ready to trigger an era but we have ForceNone, - // otherwise previous arm would short circuit. - Self::close_election_window(); - } + // either `Forcing::ForceNone`, + // or `Forcing::NotForcing if era_length >= T::SessionsPerEra::get()`. return None }, } @@ -2544,219 +2150,11 @@ impl Module { Self::new_era(session_index) } else { // Set initial era + log!(debug, "Starting the first era."); Self::new_era(session_index) } } - /// Basic and cheap checks that we perform in validate unsigned, and in the execution. - /// - /// State reads: ElectionState, CurrentEr, QueuedScore. - /// - /// This function does weight refund in case of errors, which is based upon the fact that it is - /// called at the very beginning of the call site's function. - pub fn pre_dispatch_checks(score: ElectionScore, era: EraIndex) -> DispatchResultWithPostInfo { - // discard solutions that are not in-time - // check window open - ensure!( - Self::era_election_status().is_open(), - Error::::OffchainElectionEarlySubmission.with_weight(T::DbWeight::get().reads(1)), - ); - - // check current era. - if let Some(current_era) = Self::current_era() { - ensure!( - current_era == era, - Error::::OffchainElectionEarlySubmission.with_weight(T::DbWeight::get().reads(2)), - ) - } - - // assume the given score is valid. Is it better than what we have on-chain, if we have any? - if let Some(queued_score) = Self::queued_score() { - ensure!( - is_score_better(score, queued_score, T::MinSolutionScoreBump::get()), - Error::::OffchainElectionWeakSubmission.with_weight(T::DbWeight::get().reads(3)), - ) - } - - Ok(None.into()) - } - - /// Checks a given solution and if correct and improved, writes it on chain as the queued result - /// of the next round. This may be called by both a signed and an unsigned transaction. - pub fn check_and_replace_solution( - winners: Vec, - compact_assignments: CompactAssignments, - compute: ElectionCompute, - claimed_score: ElectionScore, - era: EraIndex, - election_size: ElectionSize, - ) -> DispatchResultWithPostInfo { - // Do the basic checks. era, claimed score and window open. - let _ = Self::pre_dispatch_checks(claimed_score, era)?; - - // before we read any further state, we check that the unique targets in compact is same as - // compact. is a all in-memory check and easy to do. Moreover, it ensures that the solution - // is not full of bogus edges that can cause lots of reads to SlashingSpans. Thus, we can - // assume that the storage access of this function is always O(|winners|), not - // O(|compact.edge_count()|). - ensure!( - compact_assignments.unique_targets().len() == winners.len(), - Error::::OffchainElectionBogusWinnerCount, - ); - - // Check that the number of presented winners is sane. Most often we have more candidates - // than we need. Then it should be `Self::validator_count()`. Else it should be all the - // candidates. - let snapshot_validators_length = >::decode_len() - .map(|l| l as u32) - .ok_or_else(|| Error::::SnapshotUnavailable)?; - - // size of the solution must be correct. - ensure!( - snapshot_validators_length == u32::from(election_size.validators), - Error::::OffchainElectionBogusElectionSize, - ); - - // check the winner length only here and when we know the length of the snapshot validators - // length. - let desired_winners = Self::validator_count().min(snapshot_validators_length); - ensure!(winners.len() as u32 == desired_winners, Error::::OffchainElectionBogusWinnerCount); - - let snapshot_nominators_len = >::decode_len() - .map(|l| l as u32) - .ok_or_else(|| Error::::SnapshotUnavailable)?; - - // rest of the size of the solution must be correct. - ensure!( - snapshot_nominators_len == election_size.nominators, - Error::::OffchainElectionBogusElectionSize, - ); - - // decode snapshot validators. - let snapshot_validators = Self::snapshot_validators() - .ok_or(Error::::SnapshotUnavailable)?; - - // check if all winners were legit; this is rather cheap. Replace with accountId. - let winners = winners.into_iter().map(|widx| { - // NOTE: at the moment, since staking is explicitly blocking any offence until election - // is closed, we don't check here if the account id at `snapshot_validators[widx]` is - // actually a validator. If this ever changes, this loop needs to also check this. - snapshot_validators.get(widx as usize).cloned().ok_or(Error::::OffchainElectionBogusWinner) - }).collect::, Error>>()?; - - // decode the rest of the snapshot. - let snapshot_nominators = Self::snapshot_nominators() - .ok_or(Error::::SnapshotUnavailable)?; - - // helpers - let nominator_at = |i: NominatorIndex| -> Option { - snapshot_nominators.get(i as usize).cloned() - }; - let validator_at = |i: ValidatorIndex| -> Option { - snapshot_validators.get(i as usize).cloned() - }; - - // un-compact. - let assignments = compact_assignments.into_assignment( - nominator_at, - validator_at, - ).map_err(|e| { - // log the error since it is not propagated into the runtime error. - log!(warn, "un-compacting solution failed due to {:?}", e); - Error::::OffchainElectionBogusCompact - })?; - - // check all nominators actually including the claimed vote. Also check correct self votes. - // Note that we assume all validators and nominators in `assignments` are properly bonded, - // because they are coming from the snapshot via a given index. - for Assignment { who, distribution } in assignments.iter() { - let is_validator = >::contains_key(&who); - let maybe_nomination = Self::nominators(&who); - - if !(maybe_nomination.is_some() ^ is_validator) { - // all of the indices must map to either a validator or a nominator. If this is ever - // not the case, then the locking system of staking is most likely faulty, or we - // have bigger problems. - log!(error, "detected an error in the staking locking and snapshot."); - // abort. - return Err(Error::::OffchainElectionBogusNominator.into()); - } - - if !is_validator { - // a normal vote - let nomination = maybe_nomination.expect( - "exactly one of `maybe_validator` and `maybe_nomination.is_some` is true. \ - is_validator is false; maybe_nomination is some; qed" - ); - - // NOTE: we don't really have to check here if the sum of all edges are the - // nominator correct. Un-compacting assures this by definition. - - for (t, _) in distribution { - // each target in the provided distribution must be actually nominated by the - // nominator after the last non-zero slash. - if nomination.targets.iter().find(|&tt| tt == t).is_none() { - return Err(Error::::OffchainElectionBogusNomination.into()); - } - - if ::SlashingSpans::get(&t).map_or( - false, - |spans| nomination.submitted_in < spans.last_nonzero_slash(), - ) { - return Err(Error::::OffchainElectionSlashedNomination.into()); - } - } - } else { - // a self vote - ensure!(distribution.len() == 1, Error::::OffchainElectionBogusSelfVote); - ensure!(distribution[0].0 == *who, Error::::OffchainElectionBogusSelfVote); - // defensive only. A compact assignment of length one does NOT encode the weight and - // it is always created to be 100%. - ensure!( - distribution[0].1 == OffchainAccuracy::one(), - Error::::OffchainElectionBogusSelfVote, - ); - } - } - - // convert into staked assignments. - let staked_assignments = sp_npos_elections::assignment_ratio_to_staked( - assignments, - Self::slashable_balance_of_fn(), - ); - - // build the support map thereof in order to evaluate. - let supports = to_supports(&winners, &staked_assignments) - .map_err(|_| Error::::OffchainElectionBogusEdge)?; - - // Check if the score is the same as the claimed one. - let submitted_score = (&supports).evaluate(); - ensure!(submitted_score == claimed_score, Error::::OffchainElectionBogusScore); - - // At last, alles Ok. Exposures and store the result. - let exposures = Self::collect_exposures(supports); - log!( - info, - "A better solution (with compute {:?} and score {:?}) has been validated and stored \ - on chain.", - compute, - submitted_score, - ); - - // write new results. - >::put(ElectionResult { - elected_stashes: winners, - exposures, - compute, - }); - QueuedScore::put(submitted_score); - - // emit event. - Self::deposit_event(RawEvent::SolutionStored(compute)); - - Ok(None.into()) - } - /// Start a session potentially starting an era. fn start_session(start_session: SessionIndex) { let next_active_era = Self::active_era().map(|e| e.index + 1).unwrap_or(0); @@ -2837,15 +2235,10 @@ impl Module { if let Some(active_era_start) = active_era.start { let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); - let era_duration = now_as_millis_u64 - active_era_start; - let (validator_payout, max_payout) = inflation::compute_total_payout( - &T::RewardCurve::get(), - Self::eras_total_stake(&active_era.index), - T::Currency::total_issuance(), - // Duration of era; more than u64::MAX is rewarded as u64::MAX. - era_duration.saturated_into::(), - ); - let rest = max_payout.saturating_sub(validator_payout); + let era_duration = (now_as_millis_u64 - active_era_start).saturated_into::(); + let staked = Self::eras_total_stake(&active_era.index); + let issuance = T::Currency::total_issuance(); + let (validator_payout, rest) = T::EraPayout::era_payout(staked, issuance, era_duration); Self::deposit_event(RawEvent::EraPayout(active_era.index, validator_payout, rest)); @@ -2870,249 +2263,27 @@ impl Module { } // Set staking information for new era. - let maybe_new_validators = Self::select_and_update_validators(current_era); - // TWO_PHASE_NOTE: use this later on. - let _unused_new_validators = Self::enact_election(current_era); + let maybe_new_validators = Self::enact_election(current_era); maybe_new_validators } - /// Remove all the storage items associated with the election. - fn close_election_window() { - // Close window. - >::put(ElectionStatus::Closed); - // Kill snapshots. - Self::kill_stakers_snapshot(); - // Don't track final session. - IsCurrentSessionFinal::put(false); - } - - /// Select the new validator set at the end of the era. - /// - /// Runs [`try_do_phragmen`] and updates the following storage items: - /// - [`EraElectionStatus`]: with `None`. - /// - [`ErasStakers`]: with the new staker set. - /// - [`ErasStakersClipped`]. - /// - [`ErasValidatorPrefs`]. - /// - [`ErasTotalStake`]: with the new total stake. - /// - [`SnapshotValidators`] and [`SnapshotNominators`] are both removed. - /// - /// Internally, [`QueuedElected`], snapshots and [`QueuedScore`] are also consumed. - /// - /// If the election has been successful, It passes the new set upwards. - /// - /// This should only be called at the end of an era. - fn select_and_update_validators(current_era: EraIndex) -> Option> { - if let Some(ElectionResult::> { - elected_stashes, - exposures, - compute, - }) = Self::try_do_election() { - // Totally close the election round and data. - Self::close_election_window(); - - // Populate Stakers and write slot stake. - let mut total_stake: BalanceOf = Zero::zero(); - exposures.into_iter().for_each(|(stash, exposure)| { - total_stake = total_stake.saturating_add(exposure.total); - >::insert(current_era, &stash, &exposure); - - let mut exposure_clipped = exposure; - let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; - if exposure_clipped.others.len() > clipped_max_len { - exposure_clipped.others.sort_by(|a, b| a.value.cmp(&b.value).reverse()); - exposure_clipped.others.truncate(clipped_max_len); - } - >::insert(¤t_era, &stash, exposure_clipped); - }); - - // Insert current era staking information - >::insert(¤t_era, total_stake); - - // collect the pref of all winners - for stash in &elected_stashes { - let pref = Self::validators(stash); - >::insert(¤t_era, stash, pref); - } - - // emit event - Self::deposit_event(RawEvent::StakingElection(compute)); - - log!( - info, - "new validator set of size {:?} has been elected via {:?} for staring era {:?}", - elected_stashes.len(), - compute, - current_era, - ); - - Some(elected_stashes) - } else { - None - } - } - - /// Select a new validator set from the assembled stakers and their role preferences. It tries - /// first to peek into [`QueuedElected`]. Otherwise, it runs a new on-chain phragmen election. - /// - /// If [`QueuedElected`] and [`QueuedScore`] exists, they are both removed. No further storage - /// is updated. - fn try_do_election() -> Option>> { - // an election result from either a stored submission or locally executed one. - let next_result = >::take().or_else(|| - Self::do_on_chain_phragmen() - ); - - // either way, kill this. We remove it here to make sure it always has the exact same - // lifetime as `QueuedElected`. - QueuedScore::kill(); - - next_result - } - - /// Execute election and return the new results. The edge weights are processed into support - /// values. - /// - /// This is basically a wrapper around [`Self::do_phragmen`] which translates - /// `PrimitiveElectionResult` into `ElectionResult`. + /// Enact and process the election using the `ElectionProvider` type. /// - /// No storage item is updated. - pub fn do_on_chain_phragmen() -> Option>> { - if let Some(phragmen_result) = Self::do_phragmen::(0) { - let elected_stashes = phragmen_result.winners.iter() - .map(|(s, _)| s.clone()) - .collect::>(); - let assignments = phragmen_result.assignments; - - let staked_assignments = sp_npos_elections::assignment_ratio_to_staked( - assignments, - Self::slashable_balance_of_fn(), - ); - - let supports = to_supports( - &elected_stashes, - &staked_assignments, - ) - .map_err(|_| - log!( - error, - "on-chain phragmen is failing due to a problem in the result. This must be a bug." - ) - ) - .ok()?; - - // collect exposures - let exposures = Self::collect_exposures(supports); - - // In order to keep the property required by `on_session_ending` that we must return the - // new validator set even if it's the same as the old, as long as any underlying - // economic conditions have changed, we don't attempt to do any optimization where we - // compare against the prior set. - Some(ElectionResult::> { - elected_stashes, - exposures, - compute: ElectionCompute::OnChain, + /// This will also process the election, as noted in [`process_election`]. + fn enact_election(current_era: EraIndex) -> Option> { + T::ElectionProvider::elect() + .map_err(|e| { + log!(warn, "election provider failed due to {:?}", e) + }) + .and_then(|(res, weight)| { + >::register_extra_weight_unchecked( + weight, + frame_support::weights::DispatchClass::Mandatory, + ); + Self::process_election(res, current_era) }) - } else { - // There were not enough candidates for even our minimal level of functionality. This is - // bad. We should probably disable all functionality except for block production and let - // the chain keep producing blocks until we can decide on a sufficiently substantial - // set. TODO: #2494 - None - } - } - - /// Execute phragmen election and return the new results. No post-processing is applied and the - /// raw edge weights are returned. - /// - /// Self votes are added and nominations before the most recent slashing span are ignored. - /// - /// No storage item is updated. - pub fn do_phragmen( - iterations: usize, - ) -> Option> { - let weight_of = Self::slashable_balance_of_fn(); - let mut all_nominators: Vec<(T::AccountId, VoteWeight, Vec)> = Vec::new(); - let mut all_validators = Vec::new(); - for (validator, _) in >::iter() { - // append self vote - let self_vote = (validator.clone(), weight_of(&validator), vec![validator.clone()]); - all_nominators.push(self_vote); - all_validators.push(validator); - } - - let nominator_votes = >::iter().map(|(nominator, nominations)| { - let Nominations { submitted_in, mut targets, suppressed: _ } = nominations; - - // Filter out nomination targets which were nominated before the most recent - // slashing span. - targets.retain(|stash| { - ::SlashingSpans::get(&stash).map_or( - true, - |spans| submitted_in >= spans.last_nonzero_slash(), - ) - }); - - (nominator, targets) - }); - all_nominators.extend(nominator_votes.map(|(n, ns)| { - let s = weight_of(&n); - (n, s, ns) - })); - - if all_validators.len() < Self::minimum_validator_count().max(1) as usize { - // If we don't have enough candidates, nothing to do. - log!( - warn, - "chain does not have enough staking candidates to operate. Era {:?}.", - Self::current_era() - ); - None - } else { - seq_phragmen::<_, Accuracy>( - Self::validator_count() as usize, - all_validators, - all_nominators, - Some((iterations, 0)), // exactly run `iterations` rounds. - ) - .map_err(|err| log!(error, "Call to seq-phragmen failed due to {:?}", err)) .ok() - } - } - - /// Consume a set of [`Supports`] from [`sp_npos_elections`] and collect them into a - /// [`Exposure`]. - fn collect_exposures( - supports: Supports, - ) -> Vec<(T::AccountId, Exposure>)> { - let total_issuance = T::Currency::total_issuance(); - let to_currency = |e: ExtendedBalance| T::CurrencyToVote::to_currency(e, total_issuance); - - supports.into_iter().map(|(validator, support)| { - // build `struct exposure` from `support` - let mut others = Vec::with_capacity(support.voters.len()); - let mut own: BalanceOf = Zero::zero(); - let mut total: BalanceOf = Zero::zero(); - support.voters - .into_iter() - .map(|(nominator, weight)| (nominator, to_currency(weight))) - .for_each(|(nominator, stake)| { - if nominator == validator { - own = own.saturating_add(stake); - } else { - others.push(IndividualExposure { who: nominator, value: stake }); - } - total = total.saturating_add(stake); - }); - - let exposure = Exposure { - own, - others, - total, - }; - - (validator, exposure) - }).collect::)>>() } /// Process the output of the election. @@ -3122,25 +2293,28 @@ impl Module { /// /// Returns `Err(())` if less than [`MinimumValidatorCount`] validators have been elected, `Ok` /// otherwise. - // TWO_PHASE_NOTE: remove the dead code. - #[allow(dead_code)] pub fn process_election( - flat_supports: sp_npos_elections::Supports, + flat_supports: frame_election_provider_support::Supports, current_era: EraIndex, ) -> Result, ()> { let exposures = Self::collect_exposures(flat_supports); let elected_stashes = exposures.iter().cloned().map(|(x, _)| x).collect::>(); - if (elected_stashes.len() as u32) <= Self::minimum_validator_count() { - log!( - warn, - "chain does not have enough staking candidates to operate for era {:?}", - current_era, - ); + if (elected_stashes.len() as u32) < Self::minimum_validator_count().max(1) { + // Session will panic if we ever return an empty validator set, thus max(1) ^^. + if current_era > 0 { + log!( + warn, + "chain does not have enough staking candidates to operate for era {:?} ({} elected, minimum is {})", + current_era, + elected_stashes.len(), + Self::minimum_validator_count(), + ); + } return Err(()); } - // Populate Stakers and write slot stake. + // Populate stakers, exposures, and the snapshot of validator prefs. let mut total_stake: BalanceOf = Zero::zero(); exposures.into_iter().for_each(|(stash, exposure)| { total_stake = total_stake.saturating_add(exposure.total); @@ -3165,28 +2339,54 @@ impl Module { } // emit event - // TWO_PHASE_NOTE: remove the inner value. - Self::deposit_event(RawEvent::StakingElection(ElectionCompute::Signed)); - - log!( - info, - "new validator set of size {:?} has been processed for era {:?}", - elected_stashes.len(), - current_era, - ); + Self::deposit_event(RawEvent::StakingElection); + + if current_era > 0 { + log!( + info, + "new validator set of size {:?} has been processed for era {:?}", + elected_stashes.len(), + current_era, + ); + } Ok(elected_stashes) } - /// Enact and process the election using the `ElectionProvider` type. - /// - /// This will also process the election, as noted in [`process_election`]. - fn enact_election(_current_era: EraIndex) -> Option> { - let _outcome = T::ElectionProvider::elect().map(|_| ()); - log!(debug, "Experimental election provider outputted {:?}", _outcome); - // TWO_PHASE_NOTE: This code path shall not return anything for now. Later on, redirect the - // results to `process_election`. - None + /// Consume a set of [`Supports`] from [`sp_npos_elections`] and collect them into a + /// [`Exposure`]. + fn collect_exposures( + supports: Supports, + ) -> Vec<(T::AccountId, Exposure>)> { + let total_issuance = T::Currency::total_issuance(); + let to_currency = |e: frame_election_provider_support::ExtendedBalance| { + T::CurrencyToVote::to_currency(e, total_issuance) + }; + + supports + .into_iter() + .map(|(validator, support)| { + // build `struct exposure` from `support` + let mut others = Vec::with_capacity(support.voters.len()); + let mut own: BalanceOf = Zero::zero(); + let mut total: BalanceOf = Zero::zero(); + support + .voters + .into_iter() + .map(|(nominator, weight)| (nominator, to_currency(weight))) + .for_each(|(nominator, stake)| { + if nominator == validator { + own = own.saturating_add(stake); + } else { + others.push(IndividualExposure { who: nominator, value: stake }); + } + total = total.saturating_add(stake); + }); + + let exposure = Exposure { own, others, total }; + (validator, exposure) + }) + .collect::)>>() } /// Remove all associated data of a stash account from the staking system. @@ -3208,7 +2408,7 @@ impl Module { >::remove(stash); >::remove(stash); - system::Module::::dec_consumers(stash); + system::Pallet::::dec_consumers(stash); Ok(()) } @@ -3251,7 +2451,6 @@ impl Module { /// relatively to their points. /// /// COMPLEXITY: Complexity is `number_of_validator_to_reward x current_elected_len`. - /// If you need to reward lots of validator consider using `reward_by_indices`. pub fn reward_by_ids( validators_points: impl IntoIterator ) { @@ -3273,13 +2472,6 @@ impl Module { } } - fn will_era_be_forced() -> bool { - match ForceEra::get() { - Forcing::ForceAlways | Forcing::ForceNew => true, - Forcing::ForceNone | Forcing::NotForcing => false, - } - } - #[cfg(feature = "runtime-benchmarks")] pub fn add_era_stakers( current_era: EraIndex, @@ -3289,11 +2481,6 @@ impl Module { >::insert(¤t_era, &controller, &exposure); } - #[cfg(feature = "runtime-benchmarks")] - pub fn put_election_status(status: ElectionStatus::) { - >::put(status); - } - #[cfg(feature = "runtime-benchmarks")] pub fn set_slash_reward_fraction(fraction: Perbill) { SlashRewardFraction::put(fraction); @@ -3319,13 +2506,17 @@ impl Module { all_voters.push(self_vote); } + // collect all slashing spans into a BTreeMap for further queries. + let slashing_spans = >::iter().collect::>(); + for (nominator, nominations) in >::iter() { let Nominations { submitted_in, mut targets, suppressed: _ } = nominations; // Filter out nomination targets which were nominated before the most recent // slashing span. targets.retain(|stash| { - Self::slashing_spans(&stash) + slashing_spans + .get(stash) .map_or(true, |spans| submitted_in >= spans.last_nonzero_slash()) }); @@ -3341,19 +2532,46 @@ impl Module { } } -impl sp_election_providers::ElectionDataProvider +impl frame_election_provider_support::ElectionDataProvider for Module { - fn desired_targets() -> u32 { - Self::validator_count() + const MAXIMUM_VOTES_PER_VOTER: u32 = T::MAX_NOMINATIONS; + fn desired_targets() -> data_provider::Result<(u32, Weight)> { + Ok((Self::validator_count(), ::DbWeight::get().reads(1))) + } + + fn voters( + maybe_max_len: Option, + ) -> data_provider::Result<(Vec<(T::AccountId, VoteWeight, Vec)>, Weight)> { + // NOTE: reading these counts already needs to iterate a lot of storage keys, but they get + // cached. This is okay for the case of `Ok(_)`, but bad for `Err(_)`, as the trait does not + // report weight in failures. + let nominator_count = >::iter().count(); + let validator_count = >::iter().count(); + let voter_count = nominator_count.saturating_add(validator_count); + + if maybe_max_len.map_or(false, |max_len| voter_count > max_len) { + return Err("Voter snapshot too big"); + } + + let slashing_span_count = >::iter().count(); + let weight = T::WeightInfo::get_npos_voters( + nominator_count as u32, + validator_count as u32, + slashing_span_count as u32, + ); + Ok((Self::get_npos_voters(), weight)) } - fn voters() -> Vec<(T::AccountId, VoteWeight, Vec)> { - Self::get_npos_voters() - } + fn targets(maybe_max_len: Option) -> data_provider::Result<(Vec, Weight)> { + let target_count = >::iter().count(); + + if maybe_max_len.map_or(false, |max_len| target_count > max_len) { + return Err("Target snapshot too big"); + } - fn targets() -> Vec { - Self::get_npos_targets() + let weight = ::DbWeight::get().reads(target_count as u64); + Ok((Self::get_npos_targets(), weight)) } fn next_election_prediction(now: T::BlockNumber) -> T::BlockNumber { @@ -3387,15 +2605,45 @@ impl sp_election_providers::ElectionDataProvider)>, targets: Vec, + target_stake: Option, ) { + use sp_std::convert::TryFrom; targets.into_iter().for_each(|v| { + let stake: BalanceOf = target_stake + .and_then(|w| >::try_from(w).ok()) + .unwrap_or(T::Currency::minimum_balance() * 100u32.into()); + >::insert(v.clone(), v.clone()); + >::insert( + v.clone(), + StakingLedger { + stash: v.clone(), + active: stake, + total: stake, + unlocking: vec![], + claimed_rewards: vec![], + }, + ); >::insert( v, ValidatorPrefs { commission: Perbill::zero(), blocked: false }, ); }); - voters.into_iter().for_each(|(v, _s, t)| { + voters.into_iter().for_each(|(v, s, t)| { + let stake = >::try_from(s).unwrap_or_else(|_| { + panic!("cannot convert a VoteWeight into BalanceOf, benchmark needs reconfiguring.") + }); + >::insert(v.clone(), v.clone()); + >::insert( + v.clone(), + StakingLedger { + stash: v.clone(), + active: stake, + total: stake, + unlocking: vec![], + claimed_rewards: vec![], + }, + ); >::insert( v, Nominations { targets: t, submitted_in: 0, suppressed: false }, @@ -3411,31 +2659,16 @@ impl sp_election_providers::ElectionDataProvider pallet_session::SessionManager for Module { fn new_session(new_index: SessionIndex) -> Option> { - log!( - trace, - "[{:?}] planning new_session({})", - >::block_number(), - new_index, - ); + log!(trace, "planning new_session({})", new_index); CurrentPlannedSession::put(new_index); Self::new_session(new_index) } fn start_session(start_index: SessionIndex) { - log!( - trace, - "[{:?}] starting start_session({})", - >::block_number(), - start_index, - ); + log!(trace, "starting start_session({})", start_index); Self::start_session(start_index) } fn end_session(end_index: SessionIndex) { - log!( - trace, - "[{:?}] ending end_session({})", - >::block_number(), - end_index, - ); + log!(trace, "ending end_session({})", end_index); Self::end_session(end_index) } } @@ -3650,8 +2883,8 @@ where } fn can_report() -> bool { - // TWO_PHASE_NOTE: we can get rid of this API - Self::era_election_status().is_closed() + // TODO: https://github.com/paritytech/substrate/issues/8343 + true } } @@ -3687,100 +2920,7 @@ where } } -#[allow(deprecated)] -impl frame_support::unsigned::ValidateUnsigned for Module { - type Call = Call; - fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { - if let Call::submit_election_solution_unsigned( - _, - _, - score, - era, - _, - ) = call { - use offchain_election::DEFAULT_LONGEVITY; - - // discard solution not coming from the local OCW. - match source { - TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } - _ => { - log!(debug, "rejecting unsigned transaction because it is not local/in-block."); - return InvalidTransaction::Call.into(); - } - } - - if let Err(error_with_post_info) = Self::pre_dispatch_checks(*score, *era) { - let invalid = to_invalid(error_with_post_info); - log!( - debug, - "💸 validate unsigned pre dispatch checks failed due to error #{:?}.", - invalid, - ); - return invalid.into(); - } - - log!(debug, "validateUnsigned succeeded for a solution at era {}.", era); - - ValidTransaction::with_tag_prefix("StakingOffchain") - // The higher the score[0], the better a solution is. - .priority(T::UnsignedPriority::get().saturating_add(score[0].saturated_into())) - // Defensive only. A single solution can exist in the pool per era. Each validator - // will run OCW at most once per era, hence there should never exist more than one - // transaction anyhow. - .and_provides(era) - // Note: this can be more accurate in the future. We do something like - // `era_end_block - current_block` but that is not needed now as we eagerly run - // offchain workers now and the above should be same as `T::ElectionLookahead` - // without the need to query more storage in the validation phase. If we randomize - // offchain worker, then we might re-consider this. - .longevity(TryInto::::try_into( - T::ElectionLookahead::get()).unwrap_or(DEFAULT_LONGEVITY) - ) - // We don't propagate this. This can never the validated at a remote node. - .propagate(false) - .build() - } else { - InvalidTransaction::Call.into() - } - } - - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - if let Call::submit_election_solution_unsigned( - _, - _, - score, - era, - _, - ) = call { - // IMPORTANT NOTE: These checks are performed in the dispatch call itself, yet we need - // to duplicate them here to prevent a block producer from putting a previously - // validated, yet no longer valid solution on chain. - // OPTIMISATION NOTE: we could skip this in the `submit_election_solution_unsigned` - // since we already do it here. The signed version needs it though. Yer for now we keep - // this duplicate check here so both signed and unsigned can use a singular - // `check_and_replace_solution`. - Self::pre_dispatch_checks(*score, *era) - .map(|_| ()) - .map_err(to_invalid) - .map_err(Into::into) - } else { - Err(InvalidTransaction::Call.into()) - } - } -} - /// Check that list is sorted and has no duplicates. fn is_sorted_and_unique(list: &[u32]) -> bool { list.windows(2).all(|w| w[0] < w[1]) } - -/// convert a DispatchErrorWithPostInfo to a custom InvalidTransaction with the inner code being the -/// error number. -fn to_invalid(error_with_post_info: DispatchErrorWithPostInfo) -> InvalidTransaction { - let error = error_with_post_info.error; - let error_number = match error { - DispatchError::Module { error, ..} => error, - _ => 0, - }; - InvalidTransaction::Custom(error_number) -} diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 0d6701c48b894..1942e5eed0c6f 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -22,14 +22,11 @@ use crate as staking; use frame_support::{ assert_ok, parameter_types, traits::{Currency, FindAuthor, Get, OnFinalize, OnInitialize, OneSessionHandler}, - weights::{constants::RocksDbWeight, Weight}, + weights::constants::RocksDbWeight, IterableStorageMap, StorageDoubleMap, StorageMap, StorageValue, }; use sp_core::H256; use sp_io; -use sp_npos_elections::{ - to_supports, reduce, ExtendedBalance, StakedAssignment, ElectionScore, EvaluateSupport, -}; use sp_runtime::{ curve::PiecewiseLinear, testing::{Header, TestXt, UintAuthorityId}, @@ -37,7 +34,7 @@ use sp_runtime::{ }; use sp_staking::offence::{OffenceDetails, OnOffenceHandler}; use std::{cell::RefCell, collections::HashSet}; -use sp_election_providers::onchain; +use frame_election_provider_support::onchain; pub const INIT_TIMESTAMP: u64 = 30_000; pub const BLOCK_TIME: u64 = 1000; @@ -98,11 +95,11 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Staking: staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, } ); @@ -126,10 +123,8 @@ parameter_types! { pub static SessionsPerEra: SessionIndex = 3; pub static ExistentialDeposit: Balance = 1; pub static SlashDeferDuration: EraIndex = 0; - pub static ElectionLookahead: BlockNumber = 0; pub static Period: BlockNumber = 5; pub static Offset: BlockNumber = 0; - pub static MaxIterations: u32 = 0; } impl frame_system::Config for Test { @@ -220,9 +215,6 @@ parameter_types! { pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &I_NPOS; pub const MaxNominatorRewardedPerValidator: u32 = 64; - pub const UnsignedPriority: u64 = 1 << 20; - pub const MinSolutionScoreBump: Perbill = Perbill::zero(); - pub OffchainSolutionWeightLimit: Weight = BlockWeights::get().max_block; } thread_local! { @@ -243,10 +235,12 @@ impl OnUnbalanced> for RewardRemainderMock { impl onchain::Config for Test { type AccountId = AccountId; type BlockNumber = BlockNumber; + type BlockWeights = BlockWeights; type Accuracy = Perbill; type DataProvider = Staking; } impl Config for Test { + const MAX_NOMINATIONS: u32 = 16; type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; @@ -259,15 +253,9 @@ impl Config for Test { type SlashCancelOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; type SessionInterface = Self; - type RewardCurve = RewardCurve; + type EraPayout = ConvertCurve; type NextNewSession = Session; - type ElectionLookahead = ElectionLookahead; - type Call = Call; - type MaxIterations = MaxIterations; - type MinSolutionScoreBump = MinSolutionScoreBump; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type UnsignedPriority = UnsignedPriority; - type OffchainSolutionWeightLimit = OffchainSolutionWeightLimit; type ElectionProvider = onchain::OnChainSequentialPhragmen; type WeightInfo = (); } @@ -281,6 +269,8 @@ where } pub type Extrinsic = TestXt; +pub(crate) type StakingCall = crate::Call; +pub(crate) type TestRuntimeCall = ::Call; pub struct ExtBuilder { validator_pool: bool, @@ -351,10 +341,6 @@ impl ExtBuilder { SESSIONS_PER_ERA.with(|v| *v.borrow_mut() = length); self } - pub fn election_lookahead(self, look: BlockNumber) -> Self { - ELECTION_LOOKAHEAD.with(|v| *v.borrow_mut() = look); - self - } pub fn period(self, length: BlockNumber) -> Self { PERIOD.with(|v| *v.borrow_mut() = length); self @@ -363,13 +349,6 @@ impl ExtBuilder { self.has_stakers = has; self } - pub fn max_offchain_iterations(self, iterations: u32) -> Self { - MAX_ITERATIONS.with(|v| *v.borrow_mut() = iterations); - self - } - pub fn offchain_election_ext(self) -> Self { - self.session_per_era(4).period(5).election_lookahead(3) - } pub fn initialize_first_session(mut self, init: bool) -> Self { self.initialize_first_session = init; self @@ -669,25 +648,22 @@ pub(crate) fn start_active_era(era_index: EraIndex) { } pub(crate) fn current_total_payout_for_duration(duration: u64) -> Balance { - let reward = inflation::compute_total_payout( - ::RewardCurve::get(), + let (payout, _rest) = ::EraPayout::era_payout( Staking::eras_total_stake(active_era()), Balances::total_issuance(), duration, - ) - .0; - assert!(reward > 0); - reward + ); + assert!(payout > 0); + payout } pub(crate) fn maximum_payout_for_duration(duration: u64) -> Balance { - inflation::compute_total_payout( - ::RewardCurve::get(), - 0, + let (payout, rest) = ::EraPayout::era_payout( + Staking::eras_total_stake(active_era()), Balances::total_issuance(), duration, - ) - .1 + ); + payout + rest } /// Time it takes to finish a session. @@ -776,198 +752,6 @@ pub(crate) fn add_slash(who: &AccountId) { ); } -// winners will be chosen by simply their unweighted total backing stake. Nominator stake is -// distributed evenly. -pub(crate) fn horrible_npos_solution( - do_reduce: bool, -) -> (CompactAssignments, Vec, ElectionScore) { - let mut backing_stake_of: BTreeMap = BTreeMap::new(); - - // self stake - >::iter().for_each(|(who, _p)| { - *backing_stake_of.entry(who).or_insert(Zero::zero()) += Staking::slashable_balance_of(&who) - }); - - // add nominator stuff - >::iter().for_each(|(who, nomination)| { - nomination.targets.iter().for_each(|v| { - *backing_stake_of.entry(*v).or_insert(Zero::zero()) += - Staking::slashable_balance_of(&who) - }) - }); - - // elect winners - let mut sorted: Vec = backing_stake_of.keys().cloned().collect(); - sorted.sort_by_key(|x| backing_stake_of.get(x).unwrap()); - let winners: Vec = sorted - .iter() - .cloned() - .take(Staking::validator_count() as usize) - .collect(); - - // create assignments - let mut staked_assignment: Vec> = Vec::new(); - >::iter().for_each(|(who, nomination)| { - let mut dist: Vec<(AccountId, ExtendedBalance)> = Vec::new(); - nomination.targets.iter().for_each(|v| { - if winners.iter().find(|w| *w == v).is_some() { - dist.push((*v, ExtendedBalance::zero())); - } - }); - - if dist.len() == 0 { - return; - } - - // assign real stakes. just split the stake. - let stake = Staking::slashable_balance_of(&who) as ExtendedBalance; - let mut sum: ExtendedBalance = Zero::zero(); - let dist_len = dist.len(); - { - dist.iter_mut().for_each(|(_, w)| { - let partial = stake / (dist_len as ExtendedBalance); - *w = partial; - sum += partial; - }); - } - - // assign the leftover to last. - { - let leftover = stake - sum; - let last = dist.last_mut().unwrap(); - last.1 += leftover; - } - - staked_assignment.push(StakedAssignment { - who, - distribution: dist, - }); - }); - - // Ensure that this result is worse than seq-phragmen. Otherwise, it should not have been used - // for testing. - let score = { - let (_, _, better_score) = prepare_submission_with(true, true, 0, |_| {}); - - let support = to_supports::(&winners, &staked_assignment).unwrap(); - let score = support.evaluate(); - - assert!(sp_npos_elections::is_score_better::( - better_score, - score, - MinSolutionScoreBump::get(), - )); - - score - }; - - if do_reduce { - reduce(&mut staked_assignment); - } - - let snapshot_validators = Staking::snapshot_validators().unwrap(); - let snapshot_nominators = Staking::snapshot_nominators().unwrap(); - let nominator_index = |a: &AccountId| -> Option { - snapshot_nominators.iter().position(|x| x == a).map(|i| i as NominatorIndex) - }; - let validator_index = |a: &AccountId| -> Option { - snapshot_validators.iter().position(|x| x == a).map(|i| i as ValidatorIndex) - }; - - // convert back to ratio assignment. This takes less space. - let assignments_reduced = - sp_npos_elections::assignment_staked_to_ratio::(staked_assignment); - - let compact = - CompactAssignments::from_assignment(assignments_reduced, nominator_index, validator_index) - .unwrap(); - - // winner ids to index - let winners = winners.into_iter().map(|w| validator_index(&w).unwrap()).collect::>(); - - (compact, winners, score) -} - -/// Note: this should always logically reproduce [`offchain_election::prepare_submission`], yet we -/// cannot do it since we want to have `tweak` injected into the process. -/// -/// If the input is being tweaked in a way that the score cannot be compute accurately, -/// `compute_real_score` can be set to true. In this case a `Default` score is returned. -pub(crate) fn prepare_submission_with( - compute_real_score: bool, - do_reduce: bool, - iterations: usize, - tweak: impl FnOnce(&mut Vec>), -) -> (CompactAssignments, Vec, ElectionScore) { - // run election on the default stuff. - let sp_npos_elections::ElectionResult { - winners, - assignments, - } = Staking::do_phragmen::(iterations).unwrap(); - let winners = sp_npos_elections::to_without_backing(winners); - - let mut staked = sp_npos_elections::assignment_ratio_to_staked( - assignments, - Staking::slashable_balance_of_fn(), - ); - - // apply custom tweaks. awesome for testing. - tweak(&mut staked); - - if do_reduce { - reduce(&mut staked); - } - - // convert back to ratio assignment. This takes less space. - let snapshot_validators = Staking::snapshot_validators().expect("snapshot not created."); - let snapshot_nominators = Staking::snapshot_nominators().expect("snapshot not created."); - let nominator_index = |a: &AccountId| -> Option { - snapshot_nominators - .iter() - .position(|x| x == a) - .map_or_else( - || { println!("unable to find nominator index for {:?}", a); None }, - |i| Some(i as NominatorIndex), - ) - }; - let validator_index = |a: &AccountId| -> Option { - snapshot_validators - .iter() - .position(|x| x == a) - .map_or_else( - || { println!("unable to find validator index for {:?}", a); None }, - |i| Some(i as ValidatorIndex), - ) - }; - - let assignments_reduced = sp_npos_elections::assignment_staked_to_ratio(staked); - - // re-compute score by converting, yet again, into staked type - let score = if compute_real_score { - let staked = sp_npos_elections::assignment_ratio_to_staked( - assignments_reduced.clone(), - Staking::slashable_balance_of_fn(), - ); - - let support_map = to_supports( - winners.as_slice(), - staked.as_slice(), - ).unwrap(); - support_map.evaluate() - } else { - Default::default() - }; - - let compact = - CompactAssignments::from_assignment(assignments_reduced, nominator_index, validator_index) - .expect("Failed to create compact"); - - // winner ids to index - let winners = winners.into_iter().map(|w| validator_index(&w).unwrap()).collect::>(); - - (compact, winners, score) -} - /// Make all validator and nominator request their payment pub(crate) fn make_all_reward_payment(era: EraIndex) { let validators_with_reward = diff --git a/frame/staking/src/offchain_election.rs b/frame/staking/src/offchain_election.rs deleted file mode 100644 index 8398c2022fc3f..0000000000000 --- a/frame/staking/src/offchain_election.rs +++ /dev/null @@ -1,588 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Helpers for offchain worker election. - -use crate::{ - Call, CompactAssignments, ElectionSize, Module, NominatorIndex, Nominators, OffchainAccuracy, - Config, ValidatorIndex, WeightInfo, -}; -use codec::Decode; -use frame_support::{traits::Get, weights::Weight, IterableStorageMap}; -use frame_system::offchain::SubmitTransaction; -use sp_npos_elections::{ - to_supports, EvaluateSupport, reduce, Assignment, ElectionResult, ElectionScore, - ExtendedBalance, CompactSolution, -}; -use sp_runtime::{ - offchain::storage::StorageValueRef, traits::TrailingZeroInput, RuntimeDebug, -}; -use sp_std::{convert::TryInto, prelude::*}; - -/// Error types related to the offchain election machinery. -#[derive(RuntimeDebug)] -pub enum OffchainElectionError { - /// election returned None. This means less candidate that minimum number of needed - /// validators were present. The chain is in trouble and not much that we can do about it. - ElectionFailed, - /// Submission to the transaction pool failed. - PoolSubmissionFailed, - /// The snapshot data is not available. - SnapshotUnavailable, - /// Error from npos-election crate. This usually relates to compact operation. - InternalElectionError(sp_npos_elections::Error), - /// One of the computed winners is invalid. - InvalidWinner, - /// A nominator is not available in the snapshot. - NominatorSnapshotCorrupt, -} - -impl From for OffchainElectionError { - fn from(e: sp_npos_elections::Error) -> Self { - Self::InternalElectionError(e) - } -} - -/// Storage key used to store the persistent offchain worker status. -pub(crate) const OFFCHAIN_HEAD_DB: &[u8] = b"parity/staking-election/"; -/// The repeat threshold of the offchain worker. This means we won't run the offchain worker twice -/// within a window of 5 blocks. -pub(crate) const OFFCHAIN_REPEAT: u32 = 5; -/// Default number of blocks for which the unsigned transaction should stay in the pool -pub(crate) const DEFAULT_LONGEVITY: u64 = 25; - -/// Checks if an execution of the offchain worker is permitted at the given block number, or not. -/// -/// This essentially makes sure that we don't run on previous blocks in case of a re-org, and we -/// don't run twice within a window of length [`OFFCHAIN_REPEAT`]. -/// -/// Returns `Ok(())` if offchain worker should happen, `Err(reason)` otherwise. -pub(crate) fn set_check_offchain_execution_status( - now: T::BlockNumber, -) -> Result<(), &'static str> { - let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); - let threshold = T::BlockNumber::from(OFFCHAIN_REPEAT); - - let mutate_stat = - storage.mutate::<_, &'static str, _>(|maybe_head: Option>| { - match maybe_head { - Some(Some(head)) if now < head => Err("fork."), - Some(Some(head)) if now >= head && now <= head + threshold => { - Err("recently executed.") - } - Some(Some(head)) if now > head + threshold => { - // we can run again now. Write the new head. - Ok(now) - } - _ => { - // value doesn't exists. Probably this node just booted up. Write, and run - Ok(now) - } - } - }); - - match mutate_stat { - // all good - Ok(Ok(_)) => Ok(()), - // failed to write. - Ok(Err(_)) => Err("failed to write to offchain db."), - // fork etc. - Err(why) => Err(why), - } -} - -/// The internal logic of the offchain worker of this module. This runs the phragmen election, -/// compacts and reduces the solution, computes the score and submits it back to the chain as an -/// unsigned transaction, without any signature. -pub(crate) fn compute_offchain_election() -> Result<(), OffchainElectionError> { - let iters = get_balancing_iters::(); - // compute raw solution. Note that we use `OffchainAccuracy`. - let ElectionResult { - winners, - assignments, - } = >::do_phragmen::(iters) - .ok_or(OffchainElectionError::ElectionFailed)?; - - // process and prepare it for submission. - let (winners, compact, score, size) = prepare_submission::( - assignments, - winners, - true, - T::OffchainSolutionWeightLimit::get(), - )?; - - crate::log!( - info, - "prepared a seq-phragmen solution with {} balancing iterations and score {:?}", - iters, - score, - ); - - // defensive-only: current era can never be none except genesis. - let current_era = >::current_era().unwrap_or_default(); - - // send it. - let call = Call::submit_election_solution_unsigned( - winners, - compact, - score, - current_era, - size, - ).into(); - - SubmitTransaction::>::submit_unsigned_transaction(call) - .map_err(|_| OffchainElectionError::PoolSubmissionFailed) -} - -/// Get a random number of iterations to run the balancing. -/// -/// Uses the offchain seed to generate a random number. -pub fn get_balancing_iters() -> usize { - match T::MaxIterations::get() { - 0 => 0, - max @ _ => { - let seed = sp_io::offchain::random_seed(); - let random = ::decode(&mut TrailingZeroInput::new(seed.as_ref())) - .expect("input is padded with zeroes; qed") % max.saturating_add(1); - random as usize - } - } -} - -/// Find the maximum `len` that a compact can have in order to fit into the block weight. -/// -/// This only returns a value between zero and `size.nominators`. -pub fn maximum_compact_len( - winners_len: u32, - size: ElectionSize, - max_weight: Weight, -) -> u32 { - use sp_std::cmp::Ordering; - - if size.nominators < 1 { - return size.nominators; - } - - let max_voters = size.nominators.max(1); - let mut voters = max_voters; - - // helper closures. - let weight_with = |voters: u32| -> Weight { - W::submit_solution_better( - size.validators.into(), - size.nominators.into(), - voters, - winners_len, - ) - }; - - let next_voters = |current_weight: Weight, voters: u32, step: u32| -> Result { - match current_weight.cmp(&max_weight) { - Ordering::Less => { - let next_voters = voters.checked_add(step); - match next_voters { - Some(voters) if voters < max_voters => Ok(voters), - _ => Err(()), - } - }, - Ordering::Greater => voters.checked_sub(step).ok_or(()), - Ordering::Equal => Ok(voters), - } - }; - - // First binary-search the right amount of voters - let mut step = voters / 2; - let mut current_weight = weight_with(voters); - while step > 0 { - match next_voters(current_weight, voters, step) { - // proceed with the binary search - Ok(next) if next != voters => { - voters = next; - }, - // we are out of bounds, break out of the loop. - Err(()) => { - break; - }, - // we found the right value - early exit the function. - Ok(next) => return next - } - step = step / 2; - current_weight = weight_with(voters); - } - - - // Time to finish. - // We might have reduced less than expected due to rounding error. Increase one last time if we - // have any room left, the reduce until we are sure we are below limit. - while voters + 1 <= max_voters && weight_with(voters + 1) < max_weight { - voters += 1; - } - while voters.checked_sub(1).is_some() && weight_with(voters) > max_weight { - voters -= 1; - } - - debug_assert!( - weight_with(voters.min(size.nominators)) <= max_weight, - "weight_with({}) <= {}", voters.min(size.nominators), max_weight, - ); - voters.min(size.nominators) -} - -/// Greedily reduce the size of the a solution to fit into the block, w.r.t. weight. -/// -/// The weight of the solution is foremost a function of the number of voters (i.e. -/// `compact.len()`). Aside from this, the other components of the weight are invariant. The number -/// of winners shall not be changed (otherwise the solution is invalid) and the `ElectionSize` is -/// merely a representation of the total number of stakers. -/// -/// Thus, we reside to stripping away some voters. This means only changing the `compact` struct. -/// -/// Note that the solution is already computed, and the winners are elected based on the merit of -/// teh entire stake in the system. Nonetheless, some of the voters will be removed further down the -/// line. -/// -/// Indeed, the score must be computed **after** this step. If this step reduces the score too much, -/// then the solution will be discarded. -pub fn trim_to_weight( - maximum_allowed_voters: u32, - mut compact: CompactAssignments, - nominator_index: FN, -) -> Result -where - for<'r> FN: Fn(&'r T::AccountId) -> Option, -{ - match compact.voter_count().checked_sub(maximum_allowed_voters as usize) { - Some(to_remove) if to_remove > 0 => { - // grab all voters and sort them by least stake. - let balance_of = >::slashable_balance_of_fn(); - let mut voters_sorted = >::iter() - .map(|(who, _)| (who.clone(), balance_of(&who))) - .collect::>(); - voters_sorted.sort_by_key(|(_, y)| *y); - - // start removing from the least stake. Iterate until we know enough have been removed. - let mut removed = 0; - for (maybe_index, _stake) in voters_sorted - .iter() - .map(|(who, stake)| (nominator_index(&who), stake)) - { - let index = maybe_index.ok_or(OffchainElectionError::NominatorSnapshotCorrupt)?; - if compact.remove_voter(index) { - crate::log!( - trace, - "removed a voter at index {} with stake {:?} from compact to reduce the size", - index, - _stake, - ); - removed += 1 - } - - if removed >= to_remove { - break; - } - } - - crate::log!( - warn, - "{} nominators out of {} had to be removed from compact solution due to size \ - limits.", - removed, - compact.voter_count() + removed, - ); - Ok(compact) - } - _ => { - // nada, return as-is - crate::log!(info, "Compact solution did not get trimmed due to block weight limits.",); - Ok(compact) - } - } -} - -/// Takes an election result and spits out some data that can be submitted to the chain. -/// -/// This does a lot of stuff; read the inline comments. -pub fn prepare_submission( - assignments: Vec>, - winners: Vec<(T::AccountId, ExtendedBalance)>, - do_reduce: bool, - maximum_weight: Weight, -) -> Result< - (Vec, CompactAssignments, ElectionScore, ElectionSize), - OffchainElectionError, -> { - // make sure that the snapshot is available. - let snapshot_validators = - >::snapshot_validators().ok_or(OffchainElectionError::SnapshotUnavailable)?; - let snapshot_nominators = - >::snapshot_nominators().ok_or(OffchainElectionError::SnapshotUnavailable)?; - - // all helper closures that we'd ever need. - let nominator_index = |a: &T::AccountId| -> Option { - snapshot_nominators - .iter() - .position(|x| x == a) - .and_then(|i| >::try_into(i).ok()) - }; - let validator_index = |a: &T::AccountId| -> Option { - snapshot_validators - .iter() - .position(|x| x == a) - .and_then(|i| >::try_into(i).ok()) - }; - let nominator_at = |i: NominatorIndex| -> Option { - snapshot_nominators.get(i as usize).cloned() - }; - - let validator_at = |i: ValidatorIndex| -> Option { - snapshot_validators.get(i as usize).cloned() - }; - - // both conversions are safe; snapshots are not created if they exceed. - let size = ElectionSize { - validators: snapshot_validators.len() as ValidatorIndex, - nominators: snapshot_nominators.len() as NominatorIndex, - }; - - // Clean winners. - let winners = sp_npos_elections::to_without_backing(winners); - - // convert into absolute value and to obtain the reduced version. - let mut staked = sp_npos_elections::assignment_ratio_to_staked( - assignments, - >::slashable_balance_of_fn(), - ); - - // reduce - if do_reduce { - reduce(&mut staked); - } - - // Convert back to ratio assignment. This takes less space. - let low_accuracy_assignment = sp_npos_elections::assignment_staked_to_ratio_normalized(staked) - .map_err(|e| OffchainElectionError::from(e))?; - - // compact encode the assignment. - let compact = CompactAssignments::from_assignment( - low_accuracy_assignment, - nominator_index, - validator_index, - ) - .map_err(|e| OffchainElectionError::from(e))?; - - // potentially reduce the size of the compact to fit weight. - let maximum_allowed_voters = - maximum_compact_len::(winners.len() as u32, size, maximum_weight); - - crate::log!( - debug, - "Maximum weight = {:?} // current weight = {:?} // maximum voters = {:?} // current votes \ - = {:?}", - maximum_weight, - T::WeightInfo::submit_solution_better( - size.validators.into(), - size.nominators.into(), - compact.voter_count() as u32, - winners.len() as u32, - ), - maximum_allowed_voters, - compact.voter_count(), - ); - - let compact = trim_to_weight::(maximum_allowed_voters, compact, &nominator_index)?; - - // re-compute the score. We re-create what the chain will do. This is a bit verbose and wastes - // CPU time, but it is necessary to ensure that the score that we claim is the same as the one - // calculated by the chain. - let score = { - let compact = compact.clone(); - let assignments = compact.into_assignment(nominator_at, validator_at).unwrap(); - let staked = sp_npos_elections::assignment_ratio_to_staked( - assignments.clone(), - >::slashable_balance_of_fn(), - ); - - let support_map = to_supports::(&winners, &staked) - .map_err(|_| OffchainElectionError::ElectionFailed)?; - support_map.evaluate() - }; - - // winners to index. Use a simple for loop for a more expressive early exit in case of error. - let mut winners_indexed: Vec = Vec::with_capacity(winners.len()); - for w in winners { - if let Some(idx) = snapshot_validators.iter().position(|v| *v == w) { - let compact_index: ValidatorIndex = idx - .try_into() - .map_err(|_| OffchainElectionError::InvalidWinner)?; - winners_indexed.push(compact_index); - } else { - return Err(OffchainElectionError::InvalidWinner); - } - } - - Ok((winners_indexed, compact, score, size)) -} - -#[cfg(test)] -mod test { - #![allow(unused_variables)] - use super::*; - use crate::ElectionSize; - - struct Staking; - - impl crate::WeightInfo for Staking { - fn bond() -> Weight { - unimplemented!() - } - fn bond_extra() -> Weight { - unimplemented!() - } - fn unbond() -> Weight { - unimplemented!() - } - fn withdraw_unbonded_update(s: u32) -> Weight { - unimplemented!() - } - fn withdraw_unbonded_kill(s: u32) -> Weight { - unimplemented!() - } - fn validate() -> Weight { - unimplemented!() - } - fn nominate(n: u32) -> Weight { - unimplemented!() - } - fn chill() -> Weight { - unimplemented!() - } - fn set_payee() -> Weight { - unimplemented!() - } - fn set_controller() -> Weight { - unimplemented!() - } - fn set_validator_count() -> Weight { - unimplemented!() - } - fn force_no_eras() -> Weight { - unimplemented!() - } - fn force_new_era() -> Weight { - unimplemented!() - } - fn force_new_era_always() -> Weight { - unimplemented!() - } - fn set_invulnerables(v: u32) -> Weight { - unimplemented!() - } - fn force_unstake(s: u32) -> Weight { - unimplemented!() - } - fn cancel_deferred_slash(s: u32) -> Weight { - unimplemented!() - } - fn payout_stakers_dead_controller(n: u32) -> Weight { - unimplemented!() - } - fn payout_stakers_alive_staked(n: u32) -> Weight { - unimplemented!() - } - fn rebond(l: u32) -> Weight { - unimplemented!() - } - fn set_history_depth(e: u32) -> Weight { - unimplemented!() - } - fn reap_stash(s: u32) -> Weight { - unimplemented!() - } - fn new_era(v: u32, n: u32) -> Weight { - unimplemented!() - } - fn submit_solution_better(v: u32, n: u32, a: u32, w: u32) -> Weight { - (0 * v + 0 * n + 1000 * a + 0 * w) as Weight - } - fn kick(w: u32) -> Weight { - unimplemented!() - } - } - - #[test] - fn find_max_voter_binary_search_works() { - let size = ElectionSize { - validators: 0, - nominators: 10, - }; - - assert_eq!(maximum_compact_len::(0, size, 0), 0); - assert_eq!(maximum_compact_len::(0, size, 1), 0); - assert_eq!(maximum_compact_len::(0, size, 999), 0); - assert_eq!(maximum_compact_len::(0, size, 1000), 1); - assert_eq!(maximum_compact_len::(0, size, 1001), 1); - assert_eq!(maximum_compact_len::(0, size, 1990), 1); - assert_eq!(maximum_compact_len::(0, size, 1999), 1); - assert_eq!(maximum_compact_len::(0, size, 2000), 2); - assert_eq!(maximum_compact_len::(0, size, 2001), 2); - assert_eq!(maximum_compact_len::(0, size, 2010), 2); - assert_eq!(maximum_compact_len::(0, size, 2990), 2); - assert_eq!(maximum_compact_len::(0, size, 2999), 2); - assert_eq!(maximum_compact_len::(0, size, 3000), 3); - assert_eq!(maximum_compact_len::(0, size, 3333), 3); - assert_eq!(maximum_compact_len::(0, size, 5500), 5); - assert_eq!(maximum_compact_len::(0, size, 7777), 7); - assert_eq!(maximum_compact_len::(0, size, 9999), 9); - assert_eq!(maximum_compact_len::(0, size, 10_000), 10); - assert_eq!(maximum_compact_len::(0, size, 10_999), 10); - assert_eq!(maximum_compact_len::(0, size, 11_000), 10); - assert_eq!(maximum_compact_len::(0, size, 22_000), 10); - - let size = ElectionSize { - validators: 0, - nominators: 1, - }; - - assert_eq!(maximum_compact_len::(0, size, 0), 0); - assert_eq!(maximum_compact_len::(0, size, 1), 0); - assert_eq!(maximum_compact_len::(0, size, 999), 0); - assert_eq!(maximum_compact_len::(0, size, 1000), 1); - assert_eq!(maximum_compact_len::(0, size, 1001), 1); - assert_eq!(maximum_compact_len::(0, size, 1990), 1); - assert_eq!(maximum_compact_len::(0, size, 1999), 1); - assert_eq!(maximum_compact_len::(0, size, 2000), 1); - assert_eq!(maximum_compact_len::(0, size, 2001), 1); - assert_eq!(maximum_compact_len::(0, size, 2010), 1); - assert_eq!(maximum_compact_len::(0, size, 3333), 1); - - let size = ElectionSize { - validators: 0, - nominators: 2, - }; - - assert_eq!(maximum_compact_len::(0, size, 0), 0); - assert_eq!(maximum_compact_len::(0, size, 1), 0); - assert_eq!(maximum_compact_len::(0, size, 999), 0); - assert_eq!(maximum_compact_len::(0, size, 1000), 1); - assert_eq!(maximum_compact_len::(0, size, 1001), 1); - assert_eq!(maximum_compact_len::(0, size, 1999), 1); - assert_eq!(maximum_compact_len::(0, size, 2000), 2); - assert_eq!(maximum_compact_len::(0, size, 2001), 2); - assert_eq!(maximum_compact_len::(0, size, 2010), 2); - assert_eq!(maximum_compact_len::(0, size, 3333), 2); - } -} diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index f6ee89704d8d2..c4daf88098e75 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -24,7 +24,6 @@ use frame_benchmarking::account; use frame_system::RawOrigin; use sp_io::hashing::blake2_256; use rand_chacha::{rand_core::{RngCore, SeedableRng}, ChaChaRng}; -use sp_npos_elections::*; const SEED: u32 = 0; @@ -112,10 +111,10 @@ pub fn create_validators( /// - `nominators`: number of bonded nominators. /// - `edge_per_nominator`: number of edge (vote) per nominator. /// - `randomize_stake`: whether to randomize the stakes. -/// - `to_nominate`: if `Some(n)`, only the first `n` bonded validator are voted upon. -/// Else, all of them are considered and `edge_per_nominator` random validators are voted for. +/// - `to_nominate`: if `Some(n)`, only the first `n` bonded validator are voted upon. Else, all of +/// them are considered and `edge_per_nominator` random validators are voted for. /// -/// Return the validators choosen to be nominated. +/// Return the validators chosen to be nominated. pub fn create_validators_with_nominators_for_era( validators: u32, nominators: u32, @@ -143,7 +142,7 @@ pub fn create_validators_with_nominators_for_era( } let to_nominate = to_nominate.unwrap_or(validators_stash.len() as u32) as usize; - let validator_choosen = validators_stash[0..to_nominate].to_vec(); + let validator_chosen = validators_stash[0..to_nominate].to_vec(); // Create nominators for j in 0 .. nominators { @@ -155,7 +154,7 @@ pub fn create_validators_with_nominators_for_era( )?; // Have them randomly validate - let mut available_validators = validator_choosen.clone(); + let mut available_validators = validator_chosen.clone(); let mut selected_validators: Vec<::Source> = Vec::with_capacity(edge_per_nominator); @@ -169,227 +168,10 @@ pub fn create_validators_with_nominators_for_era( ValidatorCount::put(validators); - Ok(validator_choosen) + Ok(validator_chosen) } - -/// Build a _really bad_ but acceptable solution for election. This should always yield a solution -/// which has a less score than the seq-phragmen. -pub fn get_weak_solution( - do_reduce: bool, -) -> (Vec, CompactAssignments, ElectionScore, ElectionSize) { - let mut backing_stake_of: BTreeMap> = BTreeMap::new(); - - // self stake - >::iter().for_each(|(who, _p)| { - *backing_stake_of.entry(who.clone()).or_insert_with(|| Zero::zero()) += - >::slashable_balance_of(&who) - }); - - // elect winners. We chose the.. least backed ones. - let mut sorted: Vec = backing_stake_of.keys().cloned().collect(); - sorted.sort_by_key(|x| backing_stake_of.get(x).unwrap()); - let winners: Vec = sorted - .iter() - .rev() - .cloned() - .take(>::validator_count() as usize) - .collect(); - - let mut staked_assignments: Vec> = Vec::new(); - // you could at this point start adding some of the nominator's stake, but for now we don't. - // This solution must be bad. - - // add self support to winners. - winners.iter().for_each(|w| { - staked_assignments.push(StakedAssignment { - who: w.clone(), - distribution: vec![( - w.clone(), - >::slashable_balance_of_vote_weight( - &w, - T::Currency::total_issuance(), - ).into(), - )], - }) - }); - - if do_reduce { - reduce(&mut staked_assignments); - } - - // helpers for building the compact - let snapshot_validators = >::snapshot_validators().unwrap(); - let snapshot_nominators = >::snapshot_nominators().unwrap(); - - let nominator_index = |a: &T::AccountId| -> Option { - snapshot_nominators - .iter() - .position(|x| x == a) - .and_then(|i| >::try_into(i).ok()) - }; - let validator_index = |a: &T::AccountId| -> Option { - snapshot_validators - .iter() - .position(|x| x == a) - .and_then(|i| >::try_into(i).ok()) - }; - - // convert back to ratio assignment. This takes less space. - let low_accuracy_assignment = assignment_staked_to_ratio_normalized(staked_assignments) - .expect("Failed to normalize"); - - // re-calculate score based on what the chain will decode. - let score = { - let staked = assignment_ratio_to_staked::<_, OffchainAccuracy, _>( - low_accuracy_assignment.clone(), - >::slashable_balance_of_fn(), - ); - - let support_map = - to_supports::(winners.as_slice(), staked.as_slice()).unwrap(); - support_map.evaluate() - }; - - // compact encode the assignment. - let compact = CompactAssignments::from_assignment( - low_accuracy_assignment, - nominator_index, - validator_index, - ) - .unwrap(); - - // winners to index. - let winners = winners - .into_iter() - .map(|w| { - snapshot_validators - .iter() - .position(|v| *v == w) - .unwrap() - .try_into() - .unwrap() - }) - .collect::>(); - - let size = ElectionSize { - validators: snapshot_validators.len() as ValidatorIndex, - nominators: snapshot_nominators.len() as NominatorIndex, - }; - - (winners, compact, score, size) -} - -/// Create a solution for seq-phragmen. This uses the same internal function as used by the offchain -/// worker code. -pub fn get_seq_phragmen_solution( - do_reduce: bool, -) -> ( - Vec, - CompactAssignments, - ElectionScore, - ElectionSize, -) { - let iters = offchain_election::get_balancing_iters::(); - - let sp_npos_elections::ElectionResult { - winners, - assignments, - } = >::do_phragmen::(iters).unwrap(); - - offchain_election::prepare_submission::( - assignments, - winners, - do_reduce, - T::BlockWeights::get().max_block, - ) - .unwrap() -} - -/// Returns a solution in which only one winner is elected with just a self vote. -pub fn get_single_winner_solution( - winner: T::AccountId, -) -> Result< - ( - Vec, - CompactAssignments, - ElectionScore, - ElectionSize, - ), - &'static str, -> { - let snapshot_validators = >::snapshot_validators().unwrap(); - let snapshot_nominators = >::snapshot_nominators().unwrap(); - - let val_index = snapshot_validators - .iter() - .position(|x| *x == winner) - .ok_or("not a validator")?; - let nom_index = snapshot_nominators - .iter() - .position(|x| *x == winner) - .ok_or("not a nominator")?; - - let stake = >::slashable_balance_of(&winner); - let stake = - ::to_vote(stake, T::Currency::total_issuance()) as ExtendedBalance; - - let val_index = val_index as ValidatorIndex; - let nom_index = nom_index as NominatorIndex; - - let winners = vec![val_index]; - let compact = CompactAssignments { - votes1: vec![(nom_index, val_index)], - ..Default::default() - }; - let score = [stake, stake, stake * stake]; - let size = ElectionSize { - validators: snapshot_validators.len() as ValidatorIndex, - nominators: snapshot_nominators.len() as NominatorIndex, - }; - - Ok((winners, compact, score, size)) -} - -/// get the active era. +/// get the current era. pub fn current_era() -> EraIndex { >::current_era().unwrap_or(0) } - -/// initialize the first era. -pub fn init_active_era() { - ActiveEra::put(ActiveEraInfo { - index: 1, - start: None, - }) -} - -/// Create random assignments for the given list of winners. Each assignment will have -/// MAX_NOMINATIONS edges. -pub fn create_assignments_for_offchain( - num_assignments: u32, - winners: Vec<::Source>, -) -> Result< - ( - Vec<(T::AccountId, ExtendedBalance)>, - Vec>, - ), - &'static str -> { - let ratio = OffchainAccuracy::from_rational_approximation(1, MAX_NOMINATIONS); - let assignments: Vec> = >::iter() - .take(num_assignments as usize) - .map(|(n, t)| Assignment { - who: n, - distribution: t.targets.iter().map(|v| (v.clone(), ratio)).collect(), - }) - .collect(); - - ensure!(assignments.len() == num_assignments as usize, "must bench for `a` assignments"); - - let winners = winners.into_iter().map(|v| { - (::lookup(v).unwrap(), 0) - }).collect(); - - Ok((winners, assignments)) -} diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 92e1862e39813..7a3ec19f8af2f 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -20,15 +20,18 @@ use super::*; use mock::*; use sp_runtime::{ - assert_eq_error_rate, traits::BadOrigin, + assert_eq_error_rate, + traits::{BadOrigin, Dispatchable}, }; use sp_staking::offence::OffenceDetails; use frame_support::{ assert_ok, assert_noop, StorageMap, - traits::{Currency, ReservableCurrency, OnInitialize, OnFinalize}, + traits::{Currency, ReservableCurrency, OnInitialize}, + weights::{extract_actual_weight, GetDispatchInfo}, }; use pallet_balances::Error as BalancesError; use substrate_test_utils::assert_eq_uvec; +use frame_election_provider_support::Support; #[test] fn force_unstake_works() { @@ -209,10 +212,10 @@ fn rewards_should_work() { individual: vec![(11, 100), (21, 50)].into_iter().collect(), } ); - let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); - let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); - let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); - let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); + let part_for_10 = Perbill::from_rational::(1000, 1125); + let part_for_20 = Perbill::from_rational::(1000, 1375); + let part_for_100_from_10 = Perbill::from_rational::(125, 1125); + let part_for_100_from_20 = Perbill::from_rational::(375, 1375); start_session(2); start_session(3); @@ -598,8 +601,8 @@ fn nominators_also_get_slashed_pro_rata() { let slash_amount = slash_percent * exposed_stake; let validator_share = - Perbill::from_rational_approximation(exposed_validator, exposed_stake) * slash_amount; - let nominator_share = Perbill::from_rational_approximation( + Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; + let nominator_share = Perbill::from_rational( exposed_nominator, exposed_stake, ) * slash_amount; @@ -1806,7 +1809,7 @@ fn bond_with_little_staked_value_bounded() { } #[test] -fn bond_with_duplicate_vote_should_be_ignored_by_npos_election() { +fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { ExtBuilder::default() .validator_count(2) .nominate(false) @@ -1817,43 +1820,43 @@ fn bond_with_duplicate_vote_should_be_ignored_by_npos_election() { assert_ok!(Staking::chill(Origin::signed(100))); // make stakes equal. assert_ok!(Staking::bond_extra(Origin::signed(31), 999)); - + // ensure all have equal stake. assert_eq!( >::iter() .map(|(v, _)| (v, Staking::ledger(v - 1).unwrap().total)) .collect::>(), vec![(31, 1000), (21, 1000), (11, 1000)], ); + // no nominators shall exist. assert!(>::iter().map(|(n, _)| n).collect::>().is_empty()); - // give the man some money + // give the man some money. let initial_balance = 1000; - for i in [1, 2, 3, 4,].iter() { + for i in [1, 2, 3, 4].iter() { let _ = Balances::make_free_balance_be(i, initial_balance); } assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); - // 11 should not be elected. All of these count as ONE vote. - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21, 31,])); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21, 31])); assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 31])); - // winners should be 21 and 31. Otherwise this election is taking duplicates into account. - let sp_npos_elections::ElectionResult { - winners, - assignments, - } = Staking::do_phragmen::(0).unwrap(); - let winners = sp_npos_elections::to_without_backing(winners); - - assert_eq!(winners, vec![31, 21]); - // only distribution to 21 and 31. - assert_eq!(assignments.iter().find(|a| a.who == 1).unwrap().distribution.len(), 2); + // winners should be 21 and 31. Otherwise this election is taking duplicates into + // account. + let supports = ::ElectionProvider::elect().unwrap().0; + assert_eq!( + supports, + vec![ + (21, Support { total: 1800, voters: vec![(21, 1000), (3, 400), (1, 400)] }), + (31, Support { total: 2200, voters: vec![(31, 1000), (3, 600), (1, 600)] }) + ], + ); }); } #[test] -fn bond_with_duplicate_vote_should_be_ignored_by_npos_election_elected() { +fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { // same as above but ensures that even when the duple is being elected, everything is sane. ExtBuilder::default() .validator_count(2) @@ -1863,39 +1866,39 @@ fn bond_with_duplicate_vote_should_be_ignored_by_npos_election_elected() { .execute_with(|| { // disable the nominator assert_ok!(Staking::chill(Origin::signed(100))); - // make stakes equal. + // 31/30 will have less stake assert_ok!(Staking::bond_extra(Origin::signed(31), 99)); - + // ensure all have equal stake. assert_eq!( >::iter() .map(|(v, _)| (v, Staking::ledger(v - 1).unwrap().total)) .collect::>(), vec![(31, 100), (21, 1000), (11, 1000)], ); + // no nominators shall exist. assert!(>::iter().map(|(n, _)| n).collect::>().is_empty()); - // give the man some money + // give the man some money. let initial_balance = 1000; for i in [1, 2, 3, 4,].iter() { let _ = Balances::make_free_balance_be(i, initial_balance); } assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21, 31,])); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21, 31])); assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 31])); - // winners should be 21 and 31. Otherwise this election is taking duplicates into account. - let sp_npos_elections::ElectionResult { - winners, - assignments, - } = Staking::do_phragmen::(0).unwrap(); - - let winners = sp_npos_elections::to_without_backing(winners); - assert_eq!(winners, vec![21, 11]); - // only distribution to 21 and 31. - assert_eq!(assignments.iter().find(|a| a.who == 1).unwrap().distribution.len(), 2); + // winners should be 21 and 11. + let supports = ::ElectionProvider::elect().unwrap().0; + assert_eq!( + supports, + vec![ + (11, Support { total: 1500, voters: vec![(11, 1000), (1, 500)] }), + (21, Support { total: 2500, voters: vec![(21, 1000), (3, 1000), (1, 500)] }) + ], + ); }); } @@ -2905,1298 +2908,6 @@ fn remove_multi_deferred() { }) } -mod offchain_election { - use crate::*; - use codec::Encode; - use frame_support::{ - assert_noop, assert_ok, assert_err_with_weight, - dispatch::DispatchResultWithPostInfo, - }; - use sp_runtime::transaction_validity::TransactionSource; - use mock::*; - use parking_lot::RwLock; - use sp_core::offchain::{ - testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, - OffchainWorkerExt, TransactionPoolExt, OffchainDbExt, - }; - use sp_io::TestExternalities; - use sp_npos_elections::StakedAssignment; - use frame_support::traits::OffchainWorker; - use std::sync::Arc; - use substrate_test_utils::assert_eq_uvec; - - fn percent(x: u16) -> OffchainAccuracy { - OffchainAccuracy::from_percent(x) - } - - /// setup a new set of validators and nominator storage items independent of the parent mock - /// file. This produces a edge graph that can be reduced. - pub fn build_offchain_election_test_ext() { - for i in (10..=40).step_by(10) { - // Note: we respect the convention of the mock (10, 11 pairs etc.) since these accounts - // have corresponding keys in session which makes everything more ergonomic and - // realistic. - bond_validator(i + 1, i, 100); - } - - let mut voter = 1; - bond_nominator(voter, 1000 + voter, 100, vec![11]); - voter = 2; - bond_nominator(voter, 1000 + voter, 100, vec![11, 11]); - voter = 3; - bond_nominator(voter, 1000 + voter, 100, vec![21, 41]); - voter = 4; - bond_nominator(voter, 1000 + voter, 100, vec![21, 31, 41]); - voter = 5; - bond_nominator(voter, 1000 + voter, 100, vec![21, 31, 41]); - } - - /// convert an externalities to one that can handle offchain worker tests. - fn offchainify(ext: &mut TestExternalities, iterations: u32) -> Arc> { - let (offchain, offchain_state) = TestOffchainExt::new(); - let (pool, pool_state) = TestTransactionPoolExt::new(); - - let mut seed = [0_u8; 32]; - seed[0..4].copy_from_slice(&iterations.to_le_bytes()); - offchain_state.write().seed = seed; - - ext.register_extension(OffchainDbExt::new(offchain.clone())); - ext.register_extension(OffchainWorkerExt::new(offchain)); - ext.register_extension(TransactionPoolExt::new(pool)); - - pool_state - } - - fn election_size() -> ElectionSize { - ElectionSize { - validators: Staking::snapshot_validators().unwrap().len() as ValidatorIndex, - nominators: Staking::snapshot_nominators().unwrap().len() as NominatorIndex, - } - } - - fn submit_solution( - origin: Origin, - winners: Vec, - compact: CompactAssignments, - score: ElectionScore, - ) -> DispatchResultWithPostInfo { - Staking::submit_election_solution( - origin, - winners, - compact, - score, - current_era(), - election_size(), - ) - } - - #[test] - fn is_current_session_final_works() { - ExtBuilder::default() - .session_per_era(3) - .build() - .execute_with(|| { - mock::start_active_era(1); - assert_eq!(Session::current_index(), 3); - assert_eq!(Staking::current_era(), Some(1)); - assert_eq!(Staking::is_current_session_final(), false); - - start_session(4); - assert_eq!(Session::current_index(), 4); - assert_eq!(Staking::current_era(), Some(1)); - assert_eq!(Staking::is_current_session_final(), true); - - start_session(5); - assert_eq!(Session::current_index(), 5); - // era changed. - assert_eq!(Staking::current_era(), Some(2)); - assert_eq!(Staking::is_current_session_final(), false); - }) - } - - #[test] - fn offchain_window_is_triggered() { - ExtBuilder::default() - .session_per_era(5) - .period(10) - .election_lookahead(3) - .build() - .execute_with(|| { - run_to_block(7); - assert_session_era!(0, 0); - - run_to_block(10); - assert_session_era!(1, 0); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(Staking::snapshot_nominators().is_none()); - assert!(Staking::snapshot_validators().is_none()); - - run_to_block(36); - assert_session_era!(3, 0); - - // fist era has session 0, which has 0 blocks length, so we have in total 40 blocks - // in the era. - run_to_block(37); - assert_session_era!(3, 0); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); - assert!(Staking::snapshot_nominators().is_some()); - assert!(Staking::snapshot_validators().is_some()); - - run_to_block(38); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); - - run_to_block(39); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); - - run_to_block(40); - assert_session_era!(4, 1); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(Staking::snapshot_nominators().is_none()); - assert!(Staking::snapshot_validators().is_none()); - - run_to_block(86); - assert_session_era!(8, 1); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(Staking::snapshot_nominators().is_none()); - assert!(Staking::snapshot_validators().is_none()); - - // second era onwards has 50 blocks per era. - run_to_block(87); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(87)); - assert!(Staking::snapshot_nominators().is_some()); - assert!(Staking::snapshot_validators().is_some()); - - run_to_block(90); - assert_session_era!(9, 2); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(Staking::snapshot_nominators().is_none()); - assert!(Staking::snapshot_validators().is_none()); - }) - } - - #[test] - fn offchain_window_is_triggered_when_forcing() { - ExtBuilder::default() - .session_per_era(5) - .period(10) - .election_lookahead(3) - .build() - .execute_with(|| { - run_to_block(12); - ForceEra::put(Forcing::ForceNew); - run_to_block(13); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - run_to_block(17); // instead of 47 - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(17)); - - run_to_block(20); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - }) - } - - #[test] - fn offchain_window_is_triggered_when_force_always() { - ExtBuilder::default() - .session_per_era(5) - .period(10) - .election_lookahead(3) - .build() - .execute_with(|| { - ForceEra::put(Forcing::ForceAlways); - run_to_block(16); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - run_to_block(17); // instead of 37 - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(17)); - - run_to_block(20); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - run_to_block(26); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - run_to_block(27); // next one again - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(27)); - }) - } - - #[test] - fn offchain_window_closes_when_forcenone() { - ExtBuilder::default() - .session_per_era(5) - .period(10) - .election_lookahead(3) - .build() - .execute_with(|| { - ForceEra::put(Forcing::ForceNone); - - run_to_block(36); - assert_session_era!(3, 0); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - // opens - run_to_block(37); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); - assert!(Staking::is_current_session_final()); - assert!(Staking::snapshot_validators().is_some()); - - // closes normally - run_to_block(40); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(!Staking::is_current_session_final()); - assert!(Staking::snapshot_validators().is_none()); - assert_session_era!(4, 0); - - run_to_block(47); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert_session_era!(4, 0); - - run_to_block(57); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert_session_era!(5, 0); - - run_to_block(67); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - // Will not open again as scheduled - run_to_block(87); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert_session_era!(8, 0); - - run_to_block(90); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert_session_era!(9, 0); - }) - } - - #[test] - fn offchain_window_on_chain_fallback_works() { - ExtBuilder::default().build_and_execute(|| { - start_session(1); - start_session(2); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - // some election must have happened by now. - assert_eq!( - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| { - if let mock::Event::staking(inner) = e { - Some(inner) - } else { - None - } - }) - .last() - .unwrap(), - RawEvent::StakingElection(ElectionCompute::OnChain), - ); - }) - } - - #[test] - #[ignore] - fn offchain_wont_work_if_snapshot_fails() { - ExtBuilder::default() - .offchain_election_ext() - .build() - .execute_with(|| { - run_to_block(12); - assert!(Staking::snapshot_validators().is_some()); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - - // validate more than the limit - let limit: NominatorIndex = ValidatorIndex::max_value() as NominatorIndex + 1; - let ctrl = 1_000_000; - for i in 0..limit { - bond_validator((1000 + i).into(), (1000 + i + ctrl).into(), 100); - } - - // window stays closed since no snapshot was taken. - run_to_block(27); - assert!(Staking::snapshot_validators().is_none()); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - }) - } - - #[test] - fn staking_is_locked_when_election_window_open() { - ExtBuilder::default() - .offchain_election_ext() - .election_lookahead(3) - .build() - .execute_with(|| { - run_to_block(12); - assert!(Staking::snapshot_validators().is_some()); - // given - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - - // chill et. al. are now not allowed. - assert_noop!( - Staking::chill(Origin::signed(10)), - Error::::CallNotAllowed, - ); - }) - } - - #[test] - fn signed_result_can_be_submitted() { - // should check that we have a new validator set normally, event says that it comes from - // offchain. - ExtBuilder::default() - .offchain_election_ext() - .build() - .execute_with(|| { - run_to_block(12); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - assert!(Staking::snapshot_validators().is_some()); - - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - )); - - let queued_result = Staking::queued_elected().unwrap(); - assert_eq!(queued_result.compute, ElectionCompute::Signed); - assert_eq!( - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| { - if let mock::Event::staking(inner) = e { - Some(inner) - } else { - None - } - }) - .last() - .unwrap(), - RawEvent::SolutionStored(ElectionCompute::Signed), - ); - - run_to_block(15); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - assert_eq!( - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| { - if let mock::Event::staking(inner) = e { - Some(inner) - } else { - None - } - }) - .last() - .unwrap(), - RawEvent::StakingElection(ElectionCompute::Signed), - ); - }) - } - - #[test] - fn signed_result_can_be_submitted_later() { - // same as `signed_result_can_be_submitted` but at a later block. - ExtBuilder::default() - .offchain_election_ext() - .build() - .execute_with(|| { - run_to_block(14); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - assert_ok!(submit_solution(Origin::signed(10), winners, compact, score)); - - let queued_result = Staking::queued_elected().unwrap(); - assert_eq!(queued_result.compute, ElectionCompute::Signed); - - run_to_block(15); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - assert_eq!( - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| { - if let mock::Event::staking(inner) = e { - Some(inner) - } else { - None - } - }) - .last() - .unwrap(), - RawEvent::StakingElection(ElectionCompute::Signed), - ); - }) - } - - #[test] - fn early_solution_submission_is_rejected() { - // should check that we have a new validator set normally, event says that it comes from - // offchain. - ExtBuilder::default() - .offchain_election_ext() - .build() - .execute_with(|| { - run_to_block(11); - // submission is not yet allowed - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - // create all the indices just to build the solution. - Staking::create_stakers_snapshot(); - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - Staking::kill_stakers_snapshot(); - - assert_err_with_weight!( - Staking::submit_election_solution( - Origin::signed(10), - winners.clone(), - compact.clone(), - score, - current_era(), - ElectionSize::default(), - ), - Error::::OffchainElectionEarlySubmission, - Some(::DbWeight::get().reads(1)), - ); - }) - } - - #[test] - fn weak_solution_is_rejected() { - // A solution which is weaker than what we currently have on-chain is rejected. - ExtBuilder::default() - .offchain_election_ext() - .has_stakers(false) - .validator_count(4) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - // a good solution - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - )); - - // a bad solution - let (compact, winners, score) = horrible_npos_solution(false); - assert_err_with_weight!( - submit_solution( - Origin::signed(10), - winners.clone(), - compact.clone(), - score, - ), - Error::::OffchainElectionWeakSubmission, - Some(::DbWeight::get().reads(3)) - ); - }) - } - - #[test] - fn better_solution_is_accepted() { - // A solution which is better than what we currently have on-chain is accepted. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - // a meeeeh solution - let (compact, winners, score) = horrible_npos_solution(false); - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - )); - - // a better solution - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - )); - }) - } - - #[test] - fn offchain_worker_runs_when_window_open() { - // at the end of the first finalized block with ElectionStatus::open(_), it should execute. - let mut ext = ExtBuilder::default() - .offchain_election_ext() - .validator_count(2) - .build(); - let state = offchainify(&mut ext, 0); - ext.execute_with(|| { - run_to_block(12); - - // local key 11 is in the elected set. - assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(state.read().transactions.len(), 0); - Staking::offchain_worker(12); - assert_eq!(state.read().transactions.len(), 1); - - let encoded = state.read().transactions[0].clone(); - let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); - - let call = extrinsic.call; - let inner = match call { - mock::Call::Staking(inner) => inner, - _ => unreachable!(), - }; - - assert_eq!( - ::validate_unsigned( - TransactionSource::Local, - &inner, - ), - TransactionValidity::Ok(ValidTransaction { - priority: UnsignedPriority::get() + 1125, // the proposed slot stake. - requires: vec![], - provides: vec![("StakingOffchain", current_era()).encode()], - longevity: 3, - propagate: false, - }) - ) - }) - } - - #[test] - fn offchain_worker_runs_with_balancing() { - // Offchain worker balances based on the number provided by randomness. See the difference - // in the priority, which comes from the computed score. - let mut ext = ExtBuilder::default() - .offchain_election_ext() - .validator_count(2) - .max_offchain_iterations(2) - .build(); - let state = offchainify(&mut ext, 2); - ext.execute_with(|| { - run_to_block(12); - - // local key 11 is in the elected set. - assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(state.read().transactions.len(), 0); - Staking::offchain_worker(12); - assert_eq!(state.read().transactions.len(), 1); - - let encoded = state.read().transactions[0].clone(); - let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); - - let call = extrinsic.call; - let inner = match call { - mock::Call::Staking(inner) => inner, - _ => unreachable!(), - }; - - assert_eq!( - ::validate_unsigned( - TransactionSource::Local, - &inner, - ), - TransactionValidity::Ok(ValidTransaction { - // the proposed slot stake, with balance_solution. - priority: UnsignedPriority::get() + 1250, - requires: vec![], - provides: vec![("StakingOffchain", active_era()).encode()], - longevity: 3, - propagate: false, - }) - ) - }) - } - - #[test] - fn mediocre_submission_from_authority_is_early_rejected() { - let mut ext = ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .build(); - let state = offchainify(&mut ext, 0); - ext.execute_with(|| { - run_to_block(12); - // put a good solution on-chain - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - ),); - - // now run the offchain worker in the same chain state. - Staking::offchain_worker(12); - assert_eq!(state.read().transactions.len(), 1); - - let encoded = state.read().transactions[0].clone(); - let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); - - let call = extrinsic.call; - let inner = match call { - mock::Call::Staking(inner) => inner, - _ => unreachable!(), - }; - - // pass this call to ValidateUnsigned - assert_eq!( - ::validate_unsigned( - TransactionSource::Local, - &inner, - ), - TransactionValidity::Err( - InvalidTransaction::Custom(>::OffchainElectionWeakSubmission.as_u8()).into(), - ), - ) - }) - } - - #[test] - fn invalid_election_correct_number_of_winners() { - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - ValidatorCount::put(3); - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - ValidatorCount::put(4); - - assert_eq!(winners.len(), 3); - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusWinnerCount, - ); - }) - } - - #[test] - fn invalid_election_solution_size() { - ExtBuilder::default() - .offchain_election_ext() - .build() - .execute_with(|| { - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ElectionSize::default(), - ), - Error::::OffchainElectionBogusElectionSize, - ); - }) - } - - #[test] - fn invalid_election_correct_number_of_winners_1() { - // if we have too little validators, then the number of candidates is the bound. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(8) // we simply cannot elect 8 - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - ValidatorCount::put(3); - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - ValidatorCount::put(4); - - assert_eq!(winners.len(), 3); - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusWinnerCount, - ); - }) - } - - #[test] - fn invalid_election_correct_number_of_winners_2() { - // if we have too little validators, then the number of candidates is the bound. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(8) // we simply cannot elect 8 - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - - assert_eq!(winners.len(), 4); - - // all good. We chose 4 and it works. - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - ),); - }) - } - - #[test] - fn invalid_election_out_of_bound_nominator_index() { - // A nominator index which is simply invalid - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - let (mut compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - - // index 9 doesn't exist. - compact.votes1.push((9, 2)); - - // The error type sadly cannot be more specific now. - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusCompact, - ); - }) - } - - #[test] - fn invalid_election_out_of_bound_validator_index() { - // A validator index which is out of bound - ExtBuilder::default() - .offchain_election_ext() - .validator_count(2) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - let (mut compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - - // index 4 doesn't exist. - compact.votes1.iter_mut().for_each(|(_, vidx)| if *vidx == 1 { *vidx = 4 }); - - // The error type sadly cannot be more specific now. - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusCompact, - ); - }) - } - - #[test] - fn invalid_election_out_of_bound_winner_index() { - // A winner index which is simply invalid - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - let (compact, _, score) = prepare_submission_with(true, true, 2, |_| {}); - - // index 4 doesn't exist. - let winners = vec![0, 1, 2, 4]; - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusWinner, - ); - }) - } - - #[test] - fn invalid_election_non_winner_validator_index() { - // An edge that points to a correct validator index who is NOT a winner. This is very - // similar to the test that raises `OffchainElectionBogusNomination`. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(2) // we select only 2. - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - let (compact, winners, score) = prepare_submission_with(false, true, 2, |a| { - // swap all 11 and 41s in the distribution with non-winners. Note that it is - // important that the count of winners and the count of unique targets remain - // valid. - a.iter_mut().for_each(| StakedAssignment { who, distribution } | - distribution.iter_mut().for_each(|(t, _)| { - if *t == 41 { *t = 31 } else { *t = 21 } - // if it is self vote, correct that. - if *who == 41 { *who = 31 } - if *who == 11 { *who = 21 } - }) - ); - }); - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusNomination, - ); - }) - } - - #[test] - fn offchain_election_unique_target_count_is_checked() { - // Number of unique targets and and winners.len must match. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(2) // we select only 2. - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - - let (compact, winners, score) = prepare_submission_with(false, true, 2, |a| { - a.iter_mut() - .find(|x| x.who == 5) - // just add any new target. - .map(|x| { - // old value. - assert_eq!(x.distribution, vec![(41, 100)]); - // new value. - x.distribution = vec![(21, 50), (41, 50)] - }); - }); - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusWinnerCount, - ); - }) - } - - #[test] - fn invalid_election_wrong_self_vote() { - // A self vote for someone else. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, true, 2, |a| { - // mutate a self vote to target someone else. That someone else is still among the - // winners - a.iter_mut().find(|x| x.who == 11).map(|x| { - x.distribution - .iter_mut() - .find(|y| y.0 == 11) - .map(|y| y.0 = 21) - }); - }); - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusSelfVote, - ); - }) - } - - #[test] - fn invalid_election_wrong_self_vote_2() { - // A self validator voting for someone else next to self vote. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, true, 2, |a| { - // Remove the self vote. - a.retain(|x| x.who != 11); - // add is as a new double vote - a.push(StakedAssignment { - who: 11, - distribution: vec![(11, 50), (21, 50)], - }); - }); - - // This raises score issue. - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusSelfVote, - ); - }) - } - - #[test] - fn invalid_election_over_stake() { - // Someone's edge ratios sums to more than 100%. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - // Note: we don't reduce here to be able to tweak votes3. votes3 will vanish if you - // reduce. - let (mut compact, winners, score) = prepare_submission_with(true, false, 0, |_| {}); - - if let Some(c) = compact.votes3.iter_mut().find(|x| x.0 == 0) { - // by default it should have been (0, [(2, 33%), (1, 33%)], 0) - // now the sum is above 100% - c.1 = [(2, percent(66)), (1, percent(66))]; - } - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusCompact, - ); - }) - } - - #[test] - fn invalid_election_under_stake() { - // at the time of this writing, we cannot under stake someone. The compact assignment works - // in a way that some of the stakes are presented by the submitter, and the last one is read - // from chain by subtracting the rest from total. Hence, the sum is always correct. - // This test is only here as a demonstration. - } - - #[test] - fn invalid_election_invalid_target_stealing() { - // A valid voter who voted for someone who is a candidate, and is a correct winner, but is - // actually NOT nominated by this nominator. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, false, 0, |a| { - // 3 only voted for 20 and 40. We add a fake vote to 30. The stake sum is still - // correctly 100. - a.iter_mut() - .find(|x| x.who == 3) - .map(|x| x.distribution = vec![(21, 50), (41, 30), (31, 20)]); - }); - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusNomination, - ); - }) - } - - #[test] - fn nomination_slash_filter_is_checked() { - // If a nominator has voted for someone who has been recently slashed, that particular - // nomination should be disabled for the upcoming election. A solution must respect this - // rule. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - - // finalize the round with fallback. This is needed since all nominator submission - // are in era zero and we want this one to pass with no problems. - run_to_block(15); - - // go to the next session to trigger mock::start_era and bump the active era - run_to_block(20); - - // slash 10. This must happen outside of the election window. - let offender_expo = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - on_offence_now( - &[OffenceDetails { - offender: (11, offender_expo.clone()), - reporters: vec![], - }], - &[Perbill::from_percent(50)], - ); - - // validate 10 again for the next round. But this guy will not have the votes that - // it should have had from 1 and 2. - assert_ok!(Staking::validate( - Origin::signed(10), - Default::default() - )); - - // open the election window and create snapshots. - run_to_block(32); - - // a solution that has been prepared after the slash. - let (compact, winners, score) = prepare_submission_with(true, false, 0, |a| { - // no one is allowed to vote for 10, except for itself. - a.into_iter() - .filter(|s| s.who != 11) - .for_each(|s| - assert!(s.distribution.iter().find(|(t, _)| *t == 11).is_none()) - ); - }); - - // can be submitted. - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - )); - - // a wrong solution. - let (compact, winners, score) = prepare_submission_with(true, false, 0, |a| { - // add back the vote that has been filtered out. - a.push(StakedAssignment { - who: 1, - distribution: vec![(11, 100)] - }); - }); - - // is rejected. - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionSlashedNomination, - ); - }) - } - - #[test] - fn invalid_election_wrong_score() { - // A valid voter who's total distributed stake is more than what they bond - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - let (compact, winners, mut score) = prepare_submission_with(true, true, 2, |_| {}); - score[0] += 1; - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusScore, - ); - }) - } - - #[test] - fn offchain_storage_is_set() { - let mut ext = ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .build(); - let state = offchainify(&mut ext, 0); - - ext.execute_with(|| { - use offchain_election::OFFCHAIN_HEAD_DB; - use sp_runtime::offchain::storage::StorageValueRef; - - run_to_block(12); - - Staking::offchain_worker(12); - // it works - assert_eq!(state.read().transactions.len(), 1); - - // and it is set - let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); - assert_eq!(storage.get::().unwrap().unwrap(), 12); - }) - } - - #[test] - fn offchain_storage_prevents_duplicate() { - let mut ext = ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .build(); - let _ = offchainify(&mut ext, 0); - - ext.execute_with(|| { - use offchain_election::OFFCHAIN_HEAD_DB; - use sp_runtime::offchain::storage::StorageValueRef; - let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); - - run_to_block(12); - - // first run -- ok - assert_eq!( - offchain_election::set_check_offchain_execution_status::(12), - Ok(()), - ); - assert_eq!(storage.get::().unwrap().unwrap(), 12); - - // re-execute after the next. not allowed. - assert_eq!( - offchain_election::set_check_offchain_execution_status::(13), - Err("recently executed."), - ); - - // a fork like situation -- re-execute 10, 11, 12. But it won't go through. - assert_eq!( - offchain_election::set_check_offchain_execution_status::(10), - Err("fork."), - ); - assert_eq!( - offchain_election::set_check_offchain_execution_status::(11), - Err("fork."), - ); - assert_eq!( - offchain_election::set_check_offchain_execution_status::(12), - Err("recently executed."), - ); - }) - } - - #[test] - #[should_panic] - fn offence_is_blocked_when_window_open() { - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - run_to_block(12); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - - let offender_expo = Staking::eras_stakers(Staking::active_era().unwrap().index, 10); - - // panic from the impl in mock - on_offence_now( - &[OffenceDetails { - offender: (10, offender_expo.clone()), - reporters: vec![], - }], - &[Perbill::from_percent(10)], - ); - }) - } -} - #[test] fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_validator() { ExtBuilder::default().build_and_execute(|| { @@ -4267,11 +2978,14 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { // * an invalid era to claim doesn't update last_reward // * double claim of one era fails ExtBuilder::default().nominate(true).build_and_execute(|| { + // Consumed weight for all payout_stakers dispatches that fail + let err_weight = weights::SubstrateWeight::::payout_stakers_alive_staked(0); + let init_balance_10 = Balances::total_balance(&10); let init_balance_100 = Balances::total_balance(&100); - let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); - let part_for_100 = Perbill::from_rational_approximation::(125, 1125); + let part_for_10 = Perbill::from_rational::(1000, 1125); + let part_for_100 = Perbill::from_rational::(125, 1125); // Check state Payee::::insert(11, RewardDestination::Controller); @@ -4312,19 +3026,19 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { assert_noop!( Staking::payout_stakers(Origin::signed(1337), 11, 0), // Fail: Era out of history - Error::::InvalidEraToReward + Error::::InvalidEraToReward.with_weight(err_weight) ); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 2)); assert_noop!( Staking::payout_stakers(Origin::signed(1337), 11, 2), // Fail: Double claim - Error::::AlreadyClaimed + Error::::AlreadyClaimed.with_weight(err_weight) ); assert_noop!( Staking::payout_stakers(Origin::signed(1337), 11, active_era), // Fail: Era not finished yet - Error::::InvalidEraToReward + Error::::InvalidEraToReward.with_weight(err_weight) ); // Era 0 can't be rewarded anymore and current era can't be rewarded yet @@ -4578,6 +3292,9 @@ fn test_payout_stakers() { fn payout_stakers_handles_basic_errors() { // Here we will test payouts handle all errors. ExtBuilder::default().has_stakers(false).build_and_execute(|| { + // Consumed weight for all payout_stakers dispatches that fail + let err_weight = weights::SubstrateWeight::::payout_stakers_alive_staked(0); + // Same setup as the test above let balance = 1000; bond_validator(11, 10, balance); // Default(64) @@ -4596,9 +3313,15 @@ fn payout_stakers_handles_basic_errors() { mock::start_active_era(2); // Wrong Era, too big - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 2), Error::::InvalidEraToReward); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 2), + Error::::InvalidEraToReward.with_weight(err_weight) + ); // Wrong Staker - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 10, 1), Error::::NotStash); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 10, 1), + Error::::NotStash.with_weight(err_weight) + ); for i in 3..100 { Staking::reward_by_ids(vec![(11, 1)]); @@ -4608,14 +3331,134 @@ fn payout_stakers_handles_basic_errors() { } // We are at era 99, with history depth of 84 // We should be able to payout era 15 through 98 (84 total eras), but not 14 or 99. - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 14), Error::::InvalidEraToReward); - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 99), Error::::InvalidEraToReward); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 14), + Error::::InvalidEraToReward.with_weight(err_weight) + ); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 99), + Error::::InvalidEraToReward.with_weight(err_weight) + ); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); // Can't claim again - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 15), Error::::AlreadyClaimed); - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 98), Error::::AlreadyClaimed); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 15), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 98), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + }); +} + +#[test] +fn payout_stakers_handles_weight_refund() { + // Note: this test relies on the assumption that `payout_stakers_alive_staked` is solely used by + // `payout_stakers` to calculate the weight of each payout op. + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let max_nom_rewarded = ::MaxNominatorRewardedPerValidator::get(); + // Make sure the configured value is meaningful for our use. + assert!(max_nom_rewarded >= 4); + let half_max_nom_rewarded = max_nom_rewarded / 2; + // Sanity check our max and half max nominator quantities. + assert!(half_max_nom_rewarded > 0); + assert!(max_nom_rewarded > half_max_nom_rewarded); + + let max_nom_rewarded_weight + = ::WeightInfo::payout_stakers_alive_staked(max_nom_rewarded); + let half_max_nom_rewarded_weight + = ::WeightInfo::payout_stakers_alive_staked(half_max_nom_rewarded); + let zero_nom_payouts_weight = ::WeightInfo::payout_stakers_alive_staked(0); + assert!(zero_nom_payouts_weight > 0); + assert!(half_max_nom_rewarded_weight > zero_nom_payouts_weight); + assert!(max_nom_rewarded_weight > half_max_nom_rewarded_weight); + + let balance = 1000; + bond_validator(11, 10, balance); + + /* Era 1 */ + start_active_era(1); + + // Reward just the validator. + Staking::reward_by_ids(vec![(11, 1)]); + + // Add some `half_max_nom_rewarded` nominators who will start backing the validator in the + // next era. + for i in 0..half_max_nom_rewarded { + bond_nominator((1000 + i).into(), (100 + i).into(), balance + i as Balance, vec![11]); + } + + /* Era 2 */ + start_active_era(2); + + // Collect payouts when there are no nominators + let call = TestRuntimeCall::Staking(StakingCall::payout_stakers(11, 1)); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert_ok!(result); + assert_eq!( + extract_actual_weight(&result, &info), + zero_nom_payouts_weight + ); + + // The validator is not rewarded in this era; so there will be zero payouts to claim for this era. + + /* Era 3 */ + start_active_era(3); + + // Collect payouts for an era where the validator did not receive any points. + let call = TestRuntimeCall::Staking(StakingCall::payout_stakers(11, 2)); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); + + // Reward the validator and its nominators. + Staking::reward_by_ids(vec![(11, 1)]); + + /* Era 4 */ + start_active_era(4); + + // Collect payouts when the validator has `half_max_nom_rewarded` nominators. + let call = TestRuntimeCall::Staking(StakingCall::payout_stakers(11, 3)); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), half_max_nom_rewarded_weight); + + // Add enough nominators so that we are at the limit. They will be active nominators + // in the next era. + for i in half_max_nom_rewarded..max_nom_rewarded { + bond_nominator((1000 + i).into(), (100 + i).into(), balance + i as Balance, vec![11]); + } + + /* Era 5 */ + start_active_era(5); + // We now have `max_nom_rewarded` nominators actively nominating our validator. + + // Reward the validator so we can collect for everyone in the next era. + Staking::reward_by_ids(vec![(11, 1)]); + + /* Era 6 */ + start_active_era(6); + + // Collect payouts when the validator had `half_max_nom_rewarded` nominators. + let call = TestRuntimeCall::Staking(StakingCall::payout_stakers(11, 5)); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), max_nom_rewarded_weight); + + // Try and collect payouts for an era that has already been collected. + let call = TestRuntimeCall::Staking(StakingCall::payout_stakers(11, 5)); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert!(result.is_err()); + // When there is an error the consumed weight == weight when there are 0 nominator payouts. + assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); }); } @@ -4704,39 +3547,6 @@ fn offences_weight_calculated_correctly() { }); } -#[test] -fn on_initialize_weight_is_correct() { - ExtBuilder::default().has_stakers(false).build_and_execute(|| { - assert_eq!(Validators::::iter().count(), 0); - assert_eq!(Nominators::::iter().count(), 0); - // When this pallet has nothing, we do 4 reads each block - let base_weight = ::DbWeight::get().reads(4); - assert_eq!(base_weight, Staking::on_initialize(0)); - }); - - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - crate::tests::offchain_election::build_offchain_election_test_ext(); - run_to_block(11); - Staking::on_finalize(System::block_number()); - System::set_block_number((System::block_number() + 1).into()); - Timestamp::set_timestamp(System::block_number() * 1000 + INIT_TIMESTAMP); - Session::on_initialize(System::block_number()); - - assert_eq!(Validators::::iter().count(), 4); - assert_eq!(Nominators::::iter().count(), 5); - // With 4 validators and 5 nominator, we should increase weight by: - // - (4 + 5) reads - // - 3 Writes - let final_weight = ::DbWeight::get().reads_writes(4 + 9, 3); - assert_eq!(final_weight, Staking::on_initialize(System::block_number())); - }); -} - #[test] fn payout_creates_controller() { ExtBuilder::default().has_stakers(false).build_and_execute(|| { @@ -5017,14 +3827,57 @@ fn do_not_die_when_active_is_ed() { }) } +#[test] +fn on_finalize_weight_is_nonzero() { + ExtBuilder::default().build_and_execute(|| { + let on_finalize_weight = ::DbWeight::get().reads(1); + assert!(Staking::on_initialize(1) >= on_finalize_weight); + }) +} + mod election_data_provider { use super::*; - use sp_election_providers::ElectionDataProvider; + use frame_election_provider_support::ElectionDataProvider; + + #[test] + fn targets_2sec_block() { + let mut validators = 1000; + while ::WeightInfo::get_npos_targets(validators) + < 2 * frame_support::weights::constants::WEIGHT_PER_SECOND + { + validators += 1; + } + + println!("Can create a snapshot of {} validators in 2sec block", validators); + } + + #[test] + fn voters_2sec_block() { + // we assume a network only wants up to 1000 validators in most cases, thus having 2000 + // candidates is as high as it gets. + let validators = 2000; + // we assume the worse case: each validator also has a slashing span. + let slashing_spans = validators; + let mut nominators = 1000; + + while ::WeightInfo::get_npos_voters(validators, nominators, slashing_spans) + < 2 * frame_support::weights::constants::WEIGHT_PER_SECOND + { + nominators += 1; + } + + println!( + "Can create a snapshot of {} nominators [{} validators, each 1 slashing] in 2sec block", + nominators, validators + ); + } #[test] fn voters_include_self_vote() { ExtBuilder::default().nominate(false).build().execute_with(|| { - assert!(>::iter().map(|(x, _)| x).all(|v| Staking::voters() + assert!(>::iter().map(|(x, _)| x).all(|v| Staking::voters(None) + .unwrap() + .0 .into_iter() .find(|(w, _, t)| { v == *w && t[0] == *w }) .is_some())) @@ -5036,7 +3889,9 @@ mod election_data_provider { ExtBuilder::default().build().execute_with(|| { assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); assert_eq!( - >::voters() + >::voters(None) + .unwrap() + .0 .iter() .find(|x| x.0 == 101) .unwrap() @@ -5050,7 +3905,9 @@ mod election_data_provider { // 11 is gone. start_active_era(2); assert_eq!( - >::voters() + >::voters(None) + .unwrap() + .0 .iter() .find(|x| x.0 == 101) .unwrap() @@ -5061,7 +3918,9 @@ mod election_data_provider { // resubmit and it is back assert_ok!(Staking::nominate(Origin::signed(100), vec![11, 21])); assert_eq!( - >::voters() + >::voters(None) + .unwrap() + .0 .iter() .find(|x| x.0 == 101) .unwrap() @@ -5071,6 +3930,14 @@ mod election_data_provider { }) } + #[test] + fn respects_len_limits() { + ExtBuilder::default().build().execute_with(|| { + assert_eq!(Staking::voters(Some(1)).unwrap_err(), "Voter snapshot too big"); + assert_eq!(Staking::targets(Some(1)).unwrap_err(), "Target snapshot too big"); + }); + } + #[test] fn estimate_next_election_works() { ExtBuilder::default().session_per_era(5).period(5).build().execute_with(|| { @@ -5086,7 +3953,7 @@ mod election_data_provider { assert_eq!(staking_events().len(), 1); assert_eq!( *staking_events().last().unwrap(), - RawEvent::StakingElection(ElectionCompute::OnChain) + RawEvent::StakingElection ); for b in 21..45 { @@ -5100,7 +3967,7 @@ mod election_data_provider { assert_eq!(staking_events().len(), 3); assert_eq!( *staking_events().last().unwrap(), - RawEvent::StakingElection(ElectionCompute::OnChain) + RawEvent::StakingElection ); }) } diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 3489a10135427..d3274cad8050e 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-02-13, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2021-03-25, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -68,356 +68,367 @@ pub trait WeightInfo { fn set_history_depth(e: u32, ) -> Weight; fn reap_stash(s: u32, ) -> Weight; fn new_era(v: u32, n: u32, ) -> Weight; - fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight; + fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight; + fn get_npos_targets(v: u32, ) -> Weight; } /// Weights for pallet_staking using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn bond() -> Weight { - (81_642_000 as Weight) + (79_895_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (66_025_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + (60_561_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (60_810_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) + (54_996_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (61_537_000 as Weight) - // Standard Error: 1_000 - .saturating_add((60_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) + (56_056_000 as Weight) + // Standard Error: 0 + .saturating_add((67_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (95_741_000 as Weight) + (90_267_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_754_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add((2_787_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (21_009_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) + (16_345_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (31_832_000 as Weight) - // Standard Error: 15_000 - .saturating_add((19_418_000 as Weight).saturating_mul(k as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) + (27_080_000 as Weight) + // Standard Error: 14_000 + .saturating_add((18_739_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (34_304_000 as Weight) - // Standard Error: 20_000 - .saturating_add((5_643_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + (29_101_000 as Weight) + // Standard Error: 23_000 + .saturating_add((5_670_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (20_103_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) + (15_771_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_payee() -> Weight { - (13_858_000 as Weight) + (13_329_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (30_269_000 as Weight) + (29_807_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_444_000 as Weight) + (2_323_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_766_000 as Weight) + (2_528_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_724_000 as Weight) + (2_529_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (2_702_000 as Weight) + (2_527_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (2_914_000 as Weight) + (2_661_000 as Weight) // Standard Error: 0 .saturating_add((35_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (64_032_000 as Weight) - // Standard Error: 2_000 - .saturating_add((2_787_000 as Weight).saturating_mul(s as Weight)) + (64_650_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_755_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_903_394_000 as Weight) - // Standard Error: 391_000 - .saturating_add((34_834_000 as Weight).saturating_mul(s as Weight)) + (5_904_642_000 as Weight) + // Standard Error: 393_000 + .saturating_add((34_810_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (141_724_000 as Weight) - // Standard Error: 24_000 - .saturating_add((53_018_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(11 as Weight)) + (131_368_000 as Weight) + // Standard Error: 17_000 + .saturating_add((52_611_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (159_994_000 as Weight) - // Standard Error: 28_000 - .saturating_add((67_746_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(12 as Weight)) + (165_079_000 as Weight) + // Standard Error: 27_000 + .saturating_add((66_740_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (42_177_000 as Weight) - // Standard Error: 1_000 - .saturating_add((82_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + (37_039_000 as Weight) + // Standard Error: 2_000 + .saturating_add((93_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 65_000 - .saturating_add((34_151_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 71_000 + .saturating_add((34_403_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (68_377_000 as Weight) + (67_561_000 as Weight) // Standard Error: 0 - .saturating_add((2_757_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_766_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 908_000 - .saturating_add((588_562_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 45_000 - .saturating_add((83_485_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(9 as Weight)) - .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) + // Standard Error: 1_016_000 + .saturating_add((389_979_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 51_000 + .saturating_add((63_208_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(10 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(13 as Weight)) + .saturating_add(T::DbWeight::get().writes(9 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } - fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { + fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 52_000 - .saturating_add((750_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 20_000 - .saturating_add((556_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 52_000 - .saturating_add((76_201_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 108_000 - .saturating_add((7_271_000 as Weight).saturating_mul(w as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + // Standard Error: 95_000 + .saturating_add((26_419_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 95_000 + .saturating_add((29_033_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_305_000 + .saturating_add((23_680_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + } + fn get_npos_targets(v: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 32_000 + .saturating_add((11_317_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } } // For backwards compatibility and tests impl WeightInfo for () { fn bond() -> Weight { - (81_642_000 as Weight) + (79_895_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (66_025_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + (60_561_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (60_810_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + (54_996_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (61_537_000 as Weight) - // Standard Error: 1_000 - .saturating_add((60_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + (56_056_000 as Weight) + // Standard Error: 0 + .saturating_add((67_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (95_741_000 as Weight) + (90_267_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_754_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add((2_787_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (21_009_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + (16_345_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (31_832_000 as Weight) - // Standard Error: 15_000 - .saturating_add((19_418_000 as Weight).saturating_mul(k as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + (27_080_000 as Weight) + // Standard Error: 14_000 + .saturating_add((18_739_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (34_304_000 as Weight) - // Standard Error: 20_000 - .saturating_add((5_643_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + (29_101_000 as Weight) + // Standard Error: 23_000 + .saturating_add((5_670_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (20_103_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + (15_771_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn set_payee() -> Weight { - (13_858_000 as Weight) + (13_329_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (30_269_000 as Weight) + (29_807_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_444_000 as Weight) + (2_323_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_766_000 as Weight) + (2_528_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_724_000 as Weight) + (2_529_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (2_702_000 as Weight) + (2_527_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (2_914_000 as Weight) + (2_661_000 as Weight) // Standard Error: 0 .saturating_add((35_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (64_032_000 as Weight) - // Standard Error: 2_000 - .saturating_add((2_787_000 as Weight).saturating_mul(s as Weight)) + (64_650_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_755_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_903_394_000 as Weight) - // Standard Error: 391_000 - .saturating_add((34_834_000 as Weight).saturating_mul(s as Weight)) + (5_904_642_000 as Weight) + // Standard Error: 393_000 + .saturating_add((34_810_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (141_724_000 as Weight) - // Standard Error: 24_000 - .saturating_add((53_018_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(11 as Weight)) + (131_368_000 as Weight) + // Standard Error: 17_000 + .saturating_add((52_611_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (159_994_000 as Weight) - // Standard Error: 28_000 - .saturating_add((67_746_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(12 as Weight)) + (165_079_000 as Weight) + // Standard Error: 27_000 + .saturating_add((66_740_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(11 as Weight)) .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (42_177_000 as Weight) - // Standard Error: 1_000 - .saturating_add((82_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + (37_039_000 as Weight) + // Standard Error: 2_000 + .saturating_add((93_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 65_000 - .saturating_add((34_151_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 71_000 + .saturating_add((34_403_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (68_377_000 as Weight) + (67_561_000 as Weight) // Standard Error: 0 - .saturating_add((2_757_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_766_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 908_000 - .saturating_add((588_562_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 45_000 - .saturating_add((83_485_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(9 as Weight)) - .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) + // Standard Error: 1_016_000 + .saturating_add((389_979_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 51_000 + .saturating_add((63_208_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(10 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(13 as Weight)) + .saturating_add(RocksDbWeight::get().writes(9 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } - fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { + fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 52_000 - .saturating_add((750_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 20_000 - .saturating_add((556_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 52_000 - .saturating_add((76_201_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 108_000 - .saturating_add((7_271_000 as Weight).saturating_mul(w as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + // Standard Error: 95_000 + .saturating_add((26_419_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 95_000 + .saturating_add((29_033_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_305_000 + .saturating_add((23_680_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + } + fn get_npos_targets(v: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 32_000 + .saturating_add((11_317_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } } diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index c7cc38a81c134..d840d45a7f430 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Sudo Module +//! # Sudo Pallet //! -//! - [`sudo::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! -//! The Sudo module allows for a single account (called the "sudo key") +//! The Sudo pallet allows for a single account (called the "sudo key") //! to execute dispatchable functions that require a `Root` call //! or designate a new account to replace them as the sudo key. //! Only one account can be the sudo key at a time. @@ -31,7 +31,7 @@ //! //! ### Dispatchable Functions //! -//! Only the sudo key can call the dispatchable functions from the Sudo module. +//! Only the sudo key can call the dispatchable functions from the Sudo pallet. //! //! * `sudo` - Make a `Root` call to a dispatchable function. //! * `set_key` - Assign a new account to be the sudo key. @@ -40,8 +40,8 @@ //! //! ### Executing Privileged Functions //! -//! The Sudo module itself is not intended to be used within other modules. -//! Instead, you can build "privileged functions" (i.e. functions that require `Root` origin) in other modules. +//! The Sudo pallet itself is not intended to be used within other pallets. +//! Instead, you can build "privileged functions" (i.e. functions that require `Root` origin) in other pallets. //! You can execute these privileged functions by calling `sudo` with the sudo key account. //! Privileged functions cannot be directly executed via an extrinsic. //! @@ -49,40 +49,49 @@ //! //! ### Simple Code Snippet //! -//! This is an example of a module that exposes a privileged function: +//! This is an example of a pallet that exposes a privileged function: //! //! ``` -//! use frame_support::{decl_module, dispatch}; -//! use frame_system::ensure_root; //! -//! pub trait Config: frame_system::Config {} +//! #[frame_support::pallet] +//! pub mod logger { +//! use frame_support::pallet_prelude::*; +//! use frame_system::pallet_prelude::*; +//! use super::*; //! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = 0] -//! pub fn privileged_function(origin) -> dispatch::DispatchResult { +//! #[pallet::config] +//! pub trait Config: frame_system::Config {} +//! +//! #[pallet::pallet] +//! pub struct Pallet(PhantomData); +//! +//! #[pallet::hooks] +//! impl Hooks> for Pallet {} +//! +//! #[pallet::call] +//! impl Pallet { +//! #[pallet::weight(0)] +//! pub fn privileged_function(origin: OriginFor) -> DispatchResultWithPostInfo { //! ensure_root(origin)?; //! //! // do something... //! -//! Ok(()) +//! Ok(().into()) //! } -//! } +//! } //! } //! # fn main() {} //! ``` //! //! ## Genesis Config //! -//! The Sudo module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). +//! The Sudo pallet depends on the [`GenesisConfig`]. //! You need to set an initial superuser account as the sudo `key`. //! -//! ## Related Modules +//! ## Related Pallets //! //! * [Democracy](../pallet_democracy/index.html) //! -//! [`Call`]: ./enum.Call.html -//! [`Config`]: ./trait.Config.html //! [`Origin`]: https://docs.substrate.dev/docs/substrate-types #![cfg_attr(not(feature = "std"), no_std)] @@ -91,35 +100,41 @@ use sp_std::prelude::*; use sp_runtime::{DispatchResult, traits::StaticLookup}; use frame_support::{ - Parameter, decl_module, decl_event, decl_storage, decl_error, ensure, + weights::GetDispatchInfo, + traits::UnfilteredDispatchable, }; -use frame_support::{ - weights::{Weight, GetDispatchInfo, Pays}, - traits::{UnfilteredDispatchable, Get}, - dispatch::DispatchResultWithPostInfo, -}; -use frame_system::ensure_signed; #[cfg(test)] mod mock; #[cfg(test)] mod tests; -pub trait Config: frame_system::Config { - /// The overarching event type. - type Event: From> + Into<::Event>; +pub use pallet::*; - /// A sudo-able call. - type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; -} +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::{*, DispatchResult}; -decl_module! { - /// Sudo module declaration. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; - fn deposit_event() = default; + /// A sudo-able call. + type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; + } + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { /// Authenticates the sudo key and dispatches a function call with `Root` origin. /// /// The dispatch origin for this call must be _Signed_. @@ -130,17 +145,20 @@ decl_module! { /// - One DB write (event). /// - Weight of derivative `call` execution + 10,000. /// # - #[weight = { + #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); (dispatch_info.weight.saturating_add(10_000), dispatch_info.class) - }] - fn sudo(origin, call: Box<::Call>) -> DispatchResultWithPostInfo { + })] + pub(crate) fn sudo( + origin: OriginFor, + call: Box<::Call>, + ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); - Self::deposit_event(RawEvent::Sudid(res.map(|_| ()).map_err(|e| e.error))); + Self::deposit_event(Event::Sudid(res.map(|_| ()).map_err(|e| e.error))); // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -155,14 +173,18 @@ decl_module! { /// - O(1). /// - The weight of this call is defined by the caller. /// # - #[weight = (*_weight, call.get_dispatch_info().class)] - fn sudo_unchecked_weight(origin, call: Box<::Call>, _weight: Weight) -> DispatchResultWithPostInfo { + #[pallet::weight((*_weight, call.get_dispatch_info().class))] + pub(crate) fn sudo_unchecked_weight( + origin: OriginFor, + call: Box<::Call>, + _weight: Weight, + ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); - Self::deposit_event(RawEvent::Sudid(res.map(|_| ()).map_err(|e| e.error))); + Self::deposit_event(Event::Sudid(res.map(|_| ()).map_err(|e| e.error))); // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -176,14 +198,17 @@ decl_module! { /// - Limited storage reads. /// - One DB change. /// # - #[weight = 0] - fn set_key(origin, new: ::Source) -> DispatchResultWithPostInfo { + #[pallet::weight(0)] + pub(crate) fn set_key( + origin: OriginFor, + new: ::Source, + ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); let new = T::Lookup::lookup(new)?; - Self::deposit_event(RawEvent::KeyChanged(Self::key())); + Self::deposit_event(Event::KeyChanged(Self::key())); >::put(new); // Sudo user does not pay a fee. Ok(Pays::No.into()) @@ -200,7 +225,7 @@ decl_module! { /// - One DB write (event). /// - Weight of derivative `call` execution + 10,000. /// # - #[weight = { + #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( dispatch_info.weight @@ -209,8 +234,9 @@ decl_module! { .saturating_add(T::DbWeight::get().reads_writes(1, 1)), dispatch_info.class, ) - }] - fn sudo_as(origin, + })] + pub(crate) fn sudo_as( + origin: OriginFor, who: ::Source, call: Box<::Call> ) -> DispatchResultWithPostInfo { @@ -222,35 +248,55 @@ decl_module! { let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Signed(who).into()); - Self::deposit_event(RawEvent::SudoAsDone(res.map(|_| ()).map_err(|e| e.error))); + Self::deposit_event(Event::SudoAsDone(res.map(|_| ()).map_err(|e| e.error))); // Sudo user does not pay a fee. Ok(Pays::No.into()) } } -} -decl_event!( - pub enum Event where AccountId = ::AccountId { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + #[pallet::metadata(T::AccountId = "AccountId")] + pub enum Event { /// A sudo just took place. \[result\] Sudid(DispatchResult), /// The \[sudoer\] just switched identity; the old key is supplied. - KeyChanged(AccountId), + KeyChanged(T::AccountId), /// A sudo just took place. \[result\] SudoAsDone(DispatchResult), } -); -decl_storage! { - trait Store for Module as Sudo { + #[pallet::error] + /// Error for the Sudo pallet + pub enum Error { + /// Sender must be the Sudo account + RequireSudo, + } + + /// The `AccountId` of the sudo key. + #[pallet::storage] + #[pallet::getter(fn key)] + pub(super) type Key = StorageValue<_, T::AccountId, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { /// The `AccountId` of the sudo key. - Key get(fn key) config(): T::AccountId; + pub key: T::AccountId, } -} -decl_error! { - /// Error for the Sudo module - pub enum Error for Module { - /// Sender must be the Sudo account - RequireSudo, + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { + key: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + >::put(&self.key); + } } } diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 91cd03ac4756a..9aac0a129907f 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -18,7 +18,7 @@ //! Test utilities use super::*; -use frame_support::{parameter_types, weights::Weight}; +use frame_support::{parameter_types, traits::GenesisBuild}; use sp_core::H256; use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; use sp_io; @@ -27,52 +27,80 @@ use frame_support::traits::Filter; use frame_system::limits; // Logger module to track execution. +#[frame_support::pallet] pub mod logger { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; use super::*; - use frame_system::ensure_root; + #[pallet::config] pub trait Config: frame_system::Config { - type Event: From> + Into<::Event>; + type Event: From> + IsType<::Event>; } - decl_storage! { - trait Store for Module as Logger { - AccountLog get(fn account_log): Vec; - I32Log get(fn i32_log): Vec; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(*weight)] + pub(crate) fn privileged_i32_log( + origin: OriginFor, + i: i32, + weight: Weight + ) -> DispatchResultWithPostInfo { + // Ensure that the `origin` is `Root`. + ensure_root(origin)?; + >::append(i); + Self::deposit_event(Event::AppendI32(i, weight)); + Ok(().into()) } - } - decl_event! { - pub enum Event where AccountId = ::AccountId { - AppendI32(i32, Weight), - AppendI32AndAccount(AccountId, i32, Weight), + #[pallet::weight(*weight)] + pub(crate) fn non_privileged_log( + origin: OriginFor, + i: i32, + weight: Weight + ) -> DispatchResultWithPostInfo { + // Ensure that the `origin` is some signed account. + let sender = ensure_signed(origin)?; + >::append(i); + >::append(sender.clone()); + Self::deposit_event(Event::AppendI32AndAccount(sender, i, weight)); + Ok(().into()) } } - decl_module! { - pub struct Module for enum Call where origin: ::Origin { - fn deposit_event() = default; - - #[weight = *weight] - fn privileged_i32_log(origin, i: i32, weight: Weight){ - // Ensure that the `origin` is `Root`. - ensure_root(origin)?; - ::append(i); - Self::deposit_event(RawEvent::AppendI32(i, weight)); - } - - #[weight = *weight] - fn non_privileged_log(origin, i: i32, weight: Weight){ - // Ensure that the `origin` is some signed account. - let sender = ensure_signed(origin)?; - ::append(i); - >::append(sender.clone()); - Self::deposit_event(RawEvent::AppendI32AndAccount(sender, i, weight)); - } - } + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + #[pallet::metadata(T::AccountId = "AccountId")] + pub enum Event { + AppendI32(i32, Weight), + AppendI32AndAccount(T::AccountId, i32, Weight), } + + #[pallet::storage] + #[pallet::getter(fn account_log)] + pub(super) type AccountLog = StorageValue< + _, + Vec, + ValueQuery + >; + + #[pallet::storage] + #[pallet::getter(fn i32_log)] + pub(super) type I32Log = StorageValue< + _, + Vec, + ValueQuery + >; } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -82,9 +110,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Sudo: sudo::{Module, Call, Config, Storage, Event}, - Logger: logger::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Sudo: sudo::{Pallet, Call, Config, Storage, Event}, + Logger: logger::{Pallet, Call, Storage, Event}, } ); diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index 4d2552b7b88b4..780e07676b29c 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -58,7 +58,7 @@ fn sudo_emits_events_correctly() { // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1))); assert_ok!(Sudo::sudo(Origin::signed(1), call)); - let expected_event = TestEvent::sudo(RawEvent::Sudid(Ok(()))); + let expected_event = TestEvent::sudo(Event::Sudid(Ok(()))); assert!(System::events().iter().any(|a| a.event == expected_event)); }) } @@ -97,7 +97,7 @@ fn sudo_unchecked_weight_emits_events_correctly() { // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1))); assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000)); - let expected_event = TestEvent::sudo(RawEvent::Sudid(Ok(()))); + let expected_event = TestEvent::sudo(Event::Sudid(Ok(()))); assert!(System::events().iter().any(|a| a.event == expected_event)); }) } @@ -124,11 +124,11 @@ fn set_key_emits_events_correctly() { // A root `key` can change the root `key`. assert_ok!(Sudo::set_key(Origin::signed(1), 2)); - let expected_event = TestEvent::sudo(RawEvent::KeyChanged(1)); + let expected_event = TestEvent::sudo(Event::KeyChanged(1)); assert!(System::events().iter().any(|a| a.event == expected_event)); // Double check. assert_ok!(Sudo::set_key(Origin::signed(2), 4)); - let expected_event = TestEvent::sudo(RawEvent::KeyChanged(2)); + let expected_event = TestEvent::sudo(Event::KeyChanged(2)); assert!(System::events().iter().any(|a| a.event == expected_event)); }); } @@ -164,7 +164,7 @@ fn sudo_as_emits_events_correctly() { // A non-privileged function will work when passed to `sudo_as` with the root `key`. let call = Box::new(Call::Logger(LoggerCall::non_privileged_log(42, 1))); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); - let expected_event = TestEvent::sudo(RawEvent::SudoAsDone(Ok(()))); + let expected_event = TestEvent::sudo(Event::SudoAsDone(Ok(()))); assert!(System::events().iter().any(|a| a.event == expected_event)); }); } diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index abd68e4425d89..0951dbdea987d 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -19,90 +19,90 @@ mod parse; use frame_support_procedural_tools::syn_ext as ext; use frame_support_procedural_tools::{generate_crate_access, generate_hidden_includes}; -use parse::{ModuleDeclaration, RuntimeDefinition, WhereSection, ModulePart}; +use parse::{PalletDeclaration, RuntimeDefinition, WhereSection, PalletPart}; use proc_macro::TokenStream; use proc_macro2::{TokenStream as TokenStream2}; use quote::quote; use syn::{Ident, Result, TypePath}; use std::collections::HashMap; -/// The fixed name of the system module. -const SYSTEM_MODULE_NAME: &str = "System"; +/// The fixed name of the system pallet. +const SYSTEM_PALLET_NAME: &str = "System"; -/// The complete definition of a module with the resulting fixed index. +/// The complete definition of a pallet with the resulting fixed index. #[derive(Debug, Clone)] -pub struct Module { +pub struct Pallet { pub name: Ident, pub index: u8, - pub module: Ident, + pub pallet: Ident, pub instance: Option, - pub module_parts: Vec, + pub pallet_parts: Vec, } -impl Module { - /// Get resolved module parts - fn module_parts(&self) -> &[ModulePart] { - &self.module_parts +impl Pallet { + /// Get resolved pallet parts + fn pallet_parts(&self) -> &[PalletPart] { + &self.pallet_parts } /// Find matching parts - fn find_part(&self, name: &str) -> Option<&ModulePart> { - self.module_parts.iter().find(|part| part.name() == name) + fn find_part(&self, name: &str) -> Option<&PalletPart> { + self.pallet_parts.iter().find(|part| part.name() == name) } - /// Return whether module contains part + /// Return whether pallet contains part fn exists_part(&self, name: &str) -> bool { self.find_part(name).is_some() } } -/// Convert from the parsed module to their final information. -/// Assign index to each modules using same rules as rust for fieldless enum. +/// Convert from the parsed pallet to their final information. +/// Assign index to each pallet using same rules as rust for fieldless enum. /// I.e. implicit are assigned number incrementedly from last explicit or 0. -fn complete_modules(decl: impl Iterator) -> syn::Result> { +fn complete_pallets(decl: impl Iterator) -> syn::Result> { let mut indices = HashMap::new(); let mut last_index: Option = None; let mut names = HashMap::new(); decl - .map(|module| { - let final_index = match module.index { + .map(|pallet| { + let final_index = match pallet.index { Some(i) => i, None => last_index.map_or(Some(0), |i| i.checked_add(1)) .ok_or_else(|| { - let msg = "Module index doesn't fit into u8, index is 256"; - syn::Error::new(module.name.span(), msg) + let msg = "Pallet index doesn't fit into u8, index is 256"; + syn::Error::new(pallet.name.span(), msg) })?, }; last_index = Some(final_index); - if let Some(used_module) = indices.insert(final_index, module.name.clone()) { + if let Some(used_pallet) = indices.insert(final_index, pallet.name.clone()) { let msg = format!( - "Module indices are conflicting: Both modules {} and {} are at index {}", - used_module, - module.name, + "Pallet indices are conflicting: Both pallets {} and {} are at index {}", + used_pallet, + pallet.name, final_index, ); - let mut err = syn::Error::new(used_module.span(), &msg); - err.combine(syn::Error::new(module.name.span(), msg)); + let mut err = syn::Error::new(used_pallet.span(), &msg); + err.combine(syn::Error::new(pallet.name.span(), msg)); return Err(err); } - if let Some(used_module) = names.insert(module.name.clone(), module.name.span()) { - let msg = "Two modules with the same name!"; + if let Some(used_pallet) = names.insert(pallet.name.clone(), pallet.name.span()) { + let msg = "Two pallets with the same name!"; - let mut err = syn::Error::new(used_module, &msg); - err.combine(syn::Error::new(module.name.span(), &msg)); + let mut err = syn::Error::new(used_pallet, &msg); + err.combine(syn::Error::new(pallet.name.span(), &msg)); return Err(err); } - Ok(Module { - name: module.name, + Ok(Pallet { + name: pallet.name, index: final_index, - module: module.module, - instance: module.instance, - module_parts: module.module_parts, + pallet: pallet.pallet, + instance: pallet.instance, + pallet_parts: pallet.pallet_parts, }) }) .collect() @@ -124,55 +124,55 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result},`", + pallets_token.span, + "`System` pallet declaration is missing. \ + Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", ))?; let hidden_crate_name = "construct_runtime"; let scrate = generate_crate_access(&hidden_crate_name, "frame-support"); let scrate_decl = generate_hidden_includes(&hidden_crate_name, "frame-support"); - let all_but_system_modules = modules.iter().filter(|module| module.name != SYSTEM_MODULE_NAME); + let all_but_system_pallets = pallets.iter().filter(|pallet| pallet.name != SYSTEM_PALLET_NAME); let outer_event = decl_outer_event( &name, - modules.iter(), + pallets.iter(), &scrate, )?; let outer_origin = decl_outer_origin( &name, - all_but_system_modules, - &system_module, + all_but_system_pallets, + &system_pallet, &scrate, )?; - let all_modules = decl_all_modules(&name, modules.iter()); - let module_to_index = decl_pallet_runtime_setup(&modules, &scrate); + let all_pallets = decl_all_pallets(&name, pallets.iter()); + let pallet_to_index = decl_pallet_runtime_setup(&pallets, &scrate); - let dispatch = decl_outer_dispatch(&name, modules.iter(), &scrate); - let metadata = decl_runtime_metadata(&name, modules.iter(), &scrate, &unchecked_extrinsic); - let outer_config = decl_outer_config(&name, modules.iter(), &scrate); + let dispatch = decl_outer_dispatch(&name, pallets.iter(), &scrate); + let metadata = decl_runtime_metadata(&name, pallets.iter(), &scrate, &unchecked_extrinsic); + let outer_config = decl_outer_config(&name, pallets.iter(), &scrate); let inherent = decl_outer_inherent( &block, &unchecked_extrinsic, - modules.iter(), + pallets.iter(), &scrate, ); - let validate_unsigned = decl_validate_unsigned(&name, modules.iter(), &scrate); + let validate_unsigned = decl_validate_unsigned(&name, pallets.iter(), &scrate); let integrity_test = decl_integrity_test(&scrate); let res = quote!( @@ -197,9 +197,9 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result Result( runtime: &'a Ident, - module_declarations: impl Iterator, + pallet_declarations: impl Iterator, scrate: &'a TokenStream2, ) -> TokenStream2 { - let modules_tokens = module_declarations - .filter(|module_declaration| module_declaration.exists_part("ValidateUnsigned")) - .map(|module_declaration| &module_declaration.name); + let pallets_tokens = pallet_declarations + .filter(|pallet_declaration| pallet_declaration.exists_part("ValidateUnsigned")) + .map(|pallet_declaration| &pallet_declaration.name); quote!( #scrate::impl_outer_validate_unsigned!( impl ValidateUnsigned for #runtime { - #( #modules_tokens )* + #( #pallets_tokens )* } ); ) @@ -237,13 +237,13 @@ fn decl_validate_unsigned<'a>( fn decl_outer_inherent<'a>( block: &'a syn::TypePath, unchecked_extrinsic: &'a syn::TypePath, - module_declarations: impl Iterator, + pallet_declarations: impl Iterator, scrate: &'a TokenStream2, ) -> TokenStream2 { - let modules_tokens = module_declarations.filter_map(|module_declaration| { - let maybe_config_part = module_declaration.find_part("Inherent"); + let pallets_tokens = pallet_declarations.filter_map(|pallet_declaration| { + let maybe_config_part = pallet_declaration.find_part("Inherent"); maybe_config_part.map(|_| { - let name = &module_declaration.name; + let name = &pallet_declaration.name; quote!(#name,) }) }); @@ -253,7 +253,7 @@ fn decl_outer_inherent<'a>( Block = #block, UncheckedExtrinsic = #unchecked_extrinsic { - #(#modules_tokens)* + #(#pallets_tokens)* } ); ) @@ -261,37 +261,37 @@ fn decl_outer_inherent<'a>( fn decl_outer_config<'a>( runtime: &'a Ident, - module_declarations: impl Iterator, + pallet_declarations: impl Iterator, scrate: &'a TokenStream2, ) -> TokenStream2 { - let modules_tokens = module_declarations - .filter_map(|module_declaration| { - module_declaration.find_part("Config").map(|part| { + let pallets_tokens = pallet_declarations + .filter_map(|pallet_declaration| { + pallet_declaration.find_part("Config").map(|part| { let transformed_generics: Vec<_> = part .generics .params .iter() .map(|param| quote!(<#param>)) .collect(); - (module_declaration, transformed_generics) + (pallet_declaration, transformed_generics) }) }) - .map(|(module_declaration, generics)| { - let module = &module_declaration.module; + .map(|(pallet_declaration, generics)| { + let pallet = &pallet_declaration.pallet; let name = Ident::new( - &format!("{}Config", module_declaration.name), - module_declaration.name.span(), + &format!("{}Config", pallet_declaration.name), + pallet_declaration.name.span(), ); - let instance = module_declaration.instance.as_ref().into_iter(); + let instance = pallet_declaration.instance.as_ref().into_iter(); quote!( #name => - #module #(#instance)* #(#generics)*, + #pallet #(#instance)* #(#generics)*, ) }); quote!( #scrate::impl_outer_config! { - pub struct GenesisConfig for #runtime where AllModulesWithSystem = AllModulesWithSystem { - #(#modules_tokens)* + pub struct GenesisConfig for #runtime where AllPalletsWithSystem = AllPalletsWithSystem { + #(#pallets_tokens)* } } ) @@ -299,63 +299,63 @@ fn decl_outer_config<'a>( fn decl_runtime_metadata<'a>( runtime: &'a Ident, - module_declarations: impl Iterator, + pallet_declarations: impl Iterator, scrate: &'a TokenStream2, extrinsic: &TypePath, ) -> TokenStream2 { - let modules_tokens = module_declarations - .filter_map(|module_declaration| { - module_declaration.find_part("Module").map(|_| { - let filtered_names: Vec<_> = module_declaration - .module_parts() + let pallets_tokens = pallet_declarations + .filter_map(|pallet_declaration| { + pallet_declaration.find_part("Pallet").map(|_| { + let filtered_names: Vec<_> = pallet_declaration + .pallet_parts() .iter() - .filter(|part| part.name() != "Module") + .filter(|part| part.name() != "Pallet") .map(|part| part.ident()) .collect(); - (module_declaration, filtered_names) + (pallet_declaration, filtered_names) }) }) - .map(|(module_declaration, filtered_names)| { - let module = &module_declaration.module; - let name = &module_declaration.name; - let instance = module_declaration + .map(|(pallet_declaration, filtered_names)| { + let pallet = &pallet_declaration.pallet; + let name = &pallet_declaration.name; + let instance = pallet_declaration .instance .as_ref() .map(|name| quote!(<#name>)) .into_iter(); - let index = module_declaration.index; + let index = pallet_declaration.index; quote!( - #module::Module #(#instance)* as #name { index #index } with #(#filtered_names)*, + #pallet::Pallet #(#instance)* as #name { index #index } with #(#filtered_names)*, ) }); quote!( #scrate::impl_runtime_metadata!{ - for #runtime with modules where Extrinsic = #extrinsic - #(#modules_tokens)* + for #runtime with pallets where Extrinsic = #extrinsic + #(#pallets_tokens)* } ) } fn decl_outer_dispatch<'a>( runtime: &'a Ident, - module_declarations: impl Iterator, + pallet_declarations: impl Iterator, scrate: &'a TokenStream2, ) -> TokenStream2 { - let modules_tokens = module_declarations - .filter(|module_declaration| module_declaration.exists_part("Call")) - .map(|module_declaration| { - let module = &module_declaration.module; - let name = &module_declaration.name; - let index = module_declaration.index; - quote!(#[codec(index = #index)] #module::#name) + let pallets_tokens = pallet_declarations + .filter(|pallet_declaration| pallet_declaration.exists_part("Call")) + .map(|pallet_declaration| { + let pallet = &pallet_declaration.pallet; + let name = &pallet_declaration.name; + let index = pallet_declaration.index; + quote!(#[codec(index = #index)] #pallet::#name) }); quote!( #scrate::impl_outer_dispatch! { pub enum Call for #runtime where origin: Origin { - #(#modules_tokens,)* + #(#pallets_tokens,)* } } ) @@ -363,32 +363,32 @@ fn decl_outer_dispatch<'a>( fn decl_outer_origin<'a>( runtime_name: &'a Ident, - modules_except_system: impl Iterator, - system_module: &'a Module, + pallets_except_system: impl Iterator, + system_pallet: &'a Pallet, scrate: &'a TokenStream2, ) -> syn::Result { - let mut modules_tokens = TokenStream2::new(); - for module_declaration in modules_except_system { - if let Some(module_entry) = module_declaration.find_part("Origin") { - let module = &module_declaration.module; - let instance = module_declaration.instance.as_ref(); - let generics = &module_entry.generics; + let mut pallets_tokens = TokenStream2::new(); + for pallet_declaration in pallets_except_system { + if let Some(pallet_entry) = pallet_declaration.find_part("Origin") { + let pallet = &pallet_declaration.pallet; + let instance = pallet_declaration.instance.as_ref(); + let generics = &pallet_entry.generics; if instance.is_some() && generics.params.is_empty() { let msg = format!( - "Instantiable module with no generic `Origin` cannot \ - be constructed: module `{}` must have generic `Origin`", - module_declaration.name + "Instantiable pallet with no generic `Origin` cannot \ + be constructed: pallet `{}` must have generic `Origin`", + pallet_declaration.name ); - return Err(syn::Error::new(module_declaration.name.span(), msg)); + return Err(syn::Error::new(pallet_declaration.name.span(), msg)); } - let index = module_declaration.index; - let tokens = quote!(#[codec(index = #index)] #module #instance #generics,); - modules_tokens.extend(tokens); + let index = pallet_declaration.index; + let tokens = quote!(#[codec(index = #index)] #pallet #instance #generics,); + pallets_tokens.extend(tokens); } } - let system_name = &system_module.module; - let system_index = system_module.index; + let system_name = &system_pallet.pallet; + let system_index = system_pallet.index; Ok(quote!( #scrate::impl_outer_origin! { @@ -396,7 +396,7 @@ fn decl_outer_origin<'a>( system = #system_name, system_index = #system_index { - #modules_tokens + #pallets_tokens } } )) @@ -404,89 +404,99 @@ fn decl_outer_origin<'a>( fn decl_outer_event<'a>( runtime_name: &'a Ident, - module_declarations: impl Iterator, + pallet_declarations: impl Iterator, scrate: &'a TokenStream2, ) -> syn::Result { - let mut modules_tokens = TokenStream2::new(); - for module_declaration in module_declarations { - if let Some(module_entry) = module_declaration.find_part("Event") { - let module = &module_declaration.module; - let instance = module_declaration.instance.as_ref(); - let generics = &module_entry.generics; + let mut pallets_tokens = TokenStream2::new(); + for pallet_declaration in pallet_declarations { + if let Some(pallet_entry) = pallet_declaration.find_part("Event") { + let pallet = &pallet_declaration.pallet; + let instance = pallet_declaration.instance.as_ref(); + let generics = &pallet_entry.generics; if instance.is_some() && generics.params.is_empty() { let msg = format!( - "Instantiable module with no generic `Event` cannot \ - be constructed: module `{}` must have generic `Event`", - module_declaration.name, + "Instantiable pallet with no generic `Event` cannot \ + be constructed: pallet `{}` must have generic `Event`", + pallet_declaration.name, ); - return Err(syn::Error::new(module_declaration.name.span(), msg)); + return Err(syn::Error::new(pallet_declaration.name.span(), msg)); } - let index = module_declaration.index; - let tokens = quote!(#[codec(index = #index)] #module #instance #generics,); - modules_tokens.extend(tokens); + let index = pallet_declaration.index; + let tokens = quote!(#[codec(index = #index)] #pallet #instance #generics,); + pallets_tokens.extend(tokens); } } Ok(quote!( #scrate::impl_outer_event! { pub enum Event for #runtime_name { - #modules_tokens + #pallets_tokens } } )) } -fn decl_all_modules<'a>( +fn decl_all_pallets<'a>( runtime: &'a Ident, - module_declarations: impl Iterator, + pallet_declarations: impl Iterator, ) -> TokenStream2 { let mut types = TokenStream2::new(); let mut names = Vec::new(); - for module_declaration in module_declarations { - let type_name = &module_declaration.name; - let module = &module_declaration.module; + for pallet_declaration in pallet_declarations { + let type_name = &pallet_declaration.name; + let pallet = &pallet_declaration.pallet; let mut generics = vec![quote!(#runtime)]; generics.extend( - module_declaration + pallet_declaration .instance .iter() - .map(|name| quote!(#module::#name)), + .map(|name| quote!(#pallet::#name)), ); let type_decl = quote!( - pub type #type_name = #module::Module <#(#generics),*>; + pub type #type_name = #pallet::Pallet <#(#generics),*>; ); types.extend(type_decl); - names.push(&module_declaration.name); + names.push(&pallet_declaration.name); } // Make nested tuple structure like (((Babe, Consensus), Grandpa), ...) - // But ignore the system module. - let all_modules = names.iter() - .filter(|n| **n != SYSTEM_MODULE_NAME) + // But ignore the system pallet. + let all_pallets = names.iter() + .filter(|n| **n != SYSTEM_PALLET_NAME) .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - let all_modules_with_system = names.iter() + let all_pallets_with_system = names.iter() .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); quote!( #types /// All pallets included in the runtime as a nested tuple of types. /// Excludes the System pallet. - pub type AllModules = ( #all_modules ); + pub type AllPallets = ( #all_pallets ); /// All pallets included in the runtime as a nested tuple of types. - pub type AllModulesWithSystem = ( #all_modules_with_system ); + pub type AllPalletsWithSystem = ( #all_pallets_with_system ); + + /// All modules included in the runtime as a nested tuple of types. + /// Excludes the System pallet. + #[deprecated(note = "use `AllPallets` instead")] + #[allow(dead_code)] + pub type AllModules = ( #all_pallets ); + /// All modules included in the runtime as a nested tuple of types. + #[deprecated(note = "use `AllPalletsWithSystem` instead")] + #[allow(dead_code)] + pub type AllModulesWithSystem = ( #all_pallets_with_system ); ) } fn decl_pallet_runtime_setup( - module_declarations: &[Module], + pallet_declarations: &[Pallet], scrate: &TokenStream2, ) -> TokenStream2 { - let names = module_declarations.iter().map(|d| &d.name); - let names2 = module_declarations.iter().map(|d| &d.name); - let name_strings = module_declarations.iter().map(|d| d.name.to_string()); - let indices = module_declarations.iter() - .map(|module| module.index as usize); + let names = pallet_declarations.iter().map(|d| &d.name); + let names2 = pallet_declarations.iter().map(|d| &d.name); + let name_strings = pallet_declarations.iter().map(|d| d.name.to_string()); + let indices = pallet_declarations.iter() + .map(|pallet| pallet.index as usize); quote!( /// Provides an implementation of `PalletInfo` to provide information @@ -527,7 +537,7 @@ fn decl_integrity_test(scrate: &TokenStream2) -> TokenStream2 { #[test] pub fn runtime_integrity_tests() { - ::integrity_test(); + ::integrity_test(); } } ) diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index 6d4ba6cdbf743..def207439b536 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -28,7 +28,7 @@ mod keyword { syn::custom_keyword!(Block); syn::custom_keyword!(NodeBlock); syn::custom_keyword!(UncheckedExtrinsic); - syn::custom_keyword!(Module); + syn::custom_keyword!(Pallet); syn::custom_keyword!(Call); syn::custom_keyword!(Storage); syn::custom_keyword!(Event); @@ -44,7 +44,7 @@ pub struct RuntimeDefinition { pub enum_token: Token![enum], pub name: Ident, pub where_section: WhereSection, - pub modules: ext::Braces>, + pub pallets: ext::Braces>, } impl Parse for RuntimeDefinition { @@ -54,7 +54,7 @@ impl Parse for RuntimeDefinition { enum_token: input.parse()?, name: input.parse()?, where_section: input.parse()?, - modules: input.parse()?, + pallets: input.parse()?, }) } } @@ -150,20 +150,20 @@ impl Parse for WhereDefinition { } #[derive(Debug, Clone)] -pub struct ModuleDeclaration { +pub struct PalletDeclaration { pub name: Ident, /// Optional fixed index (e.g. `MyPallet ... = 3,`) pub index: Option, - pub module: Ident, + pub pallet: Ident, pub instance: Option, - pub module_parts: Vec, + pub pallet_parts: Vec, } -impl Parse for ModuleDeclaration { +impl Parse for PalletDeclaration { fn parse(input: ParseStream) -> Result { let name = input.parse()?; let _: Token![:] = input.parse()?; - let module = input.parse()?; + let pallet = input.parse()?; let instance = if input.peek(Token![::]) && input.peek3(Token![<]) { let _: Token![::] = input.parse()?; let _: Token![<] = input.parse()?; @@ -175,7 +175,7 @@ impl Parse for ModuleDeclaration { }; let _: Token![::] = input.parse()?; - let module_parts = parse_module_parts(input)?; + let pallet_parts = parse_pallet_parts(input)?; let index = if input.peek(Token![=]) { input.parse::()?; @@ -188,9 +188,9 @@ impl Parse for ModuleDeclaration { let parsed = Self { name, - module, + pallet, instance, - module_parts, + pallet_parts, index, }; @@ -198,14 +198,14 @@ impl Parse for ModuleDeclaration { } } -/// Parse [`ModulePart`]'s from a braces enclosed list that is split by commas, e.g. +/// Parse [`PalletPart`]'s from a braces enclosed list that is split by commas, e.g. /// /// `{ Call, Event }` -fn parse_module_parts(input: ParseStream) -> Result> { - let module_parts :ext::Braces> = input.parse()?; +fn parse_pallet_parts(input: ParseStream) -> Result> { + let pallet_parts :ext::Braces> = input.parse()?; let mut resolved = HashSet::new(); - for part in module_parts.content.inner.iter() { + for part in pallet_parts.content.inner.iter() { if !resolved.insert(part.name()) { let msg = format!( "`{}` was already declared before. Please remove the duplicate declaration", @@ -215,12 +215,12 @@ fn parse_module_parts(input: ParseStream) -> Result> { } } - Ok(module_parts.content.inner.into_iter().collect()) + Ok(pallet_parts.content.inner.into_iter().collect()) } #[derive(Debug, Clone)] -pub enum ModulePartKeyword { - Module(keyword::Module), +pub enum PalletPartKeyword { + Pallet(keyword::Pallet), Call(keyword::Call), Storage(keyword::Storage), Event(keyword::Event), @@ -230,12 +230,12 @@ pub enum ModulePartKeyword { ValidateUnsigned(keyword::ValidateUnsigned), } -impl Parse for ModulePartKeyword { +impl Parse for PalletPartKeyword { fn parse(input: ParseStream) -> Result { let lookahead = input.lookahead1(); - if lookahead.peek(keyword::Module) { - Ok(Self::Module(input.parse()?)) + if lookahead.peek(keyword::Pallet) { + Ok(Self::Pallet(input.parse()?)) } else if lookahead.peek(keyword::Call) { Ok(Self::Call(input.parse()?)) } else if lookahead.peek(keyword::Storage) { @@ -256,11 +256,11 @@ impl Parse for ModulePartKeyword { } } -impl ModulePartKeyword { +impl PalletPartKeyword { /// Returns the name of `Self`. fn name(&self) -> &'static str { match self { - Self::Module(_) => "Module", + Self::Pallet(_) => "Pallet", Self::Call(_) => "Call", Self::Storage(_) => "Storage", Self::Event(_) => "Event", @@ -276,21 +276,21 @@ impl ModulePartKeyword { Ident::new(self.name(), self.span()) } - /// Returns `true` if this module part is allowed to have generic arguments. + /// Returns `true` if this pallet part is allowed to have generic arguments. fn allows_generic(&self) -> bool { Self::all_generic_arg().iter().any(|n| *n == self.name()) } - /// Returns the names of all module parts that allow to have a generic argument. + /// Returns the names of all pallet parts that allow to have a generic argument. fn all_generic_arg() -> &'static [&'static str] { &["Event", "Origin", "Config"] } } -impl Spanned for ModulePartKeyword { +impl Spanned for PalletPartKeyword { fn span(&self) -> Span { match self { - Self::Module(inner) => inner.span(), + Self::Pallet(inner) => inner.span(), Self::Call(inner) => inner.span(), Self::Storage(inner) => inner.span(), Self::Event(inner) => inner.span(), @@ -303,21 +303,21 @@ impl Spanned for ModulePartKeyword { } #[derive(Debug, Clone)] -pub struct ModulePart { - pub keyword: ModulePartKeyword, +pub struct PalletPart { + pub keyword: PalletPartKeyword, pub generics: syn::Generics, } -impl Parse for ModulePart { +impl Parse for PalletPart { fn parse(input: ParseStream) -> Result { - let keyword: ModulePartKeyword = input.parse()?; + let keyword: PalletPartKeyword = input.parse()?; let generics: syn::Generics = input.parse()?; if !generics.params.is_empty() && !keyword.allows_generic() { - let valid_generics = ModulePart::format_names(ModulePartKeyword::all_generic_arg()); + let valid_generics = PalletPart::format_names(PalletPartKeyword::all_generic_arg()); let msg = format!( "`{}` is not allowed to have generics. \ - Only the following modules are allowed to have generics: {}.", + Only the following pallets are allowed to have generics: {}.", keyword.name(), valid_generics, ); @@ -331,18 +331,18 @@ impl Parse for ModulePart { } } -impl ModulePart { +impl PalletPart { pub fn format_names(names: &[&'static str]) -> String { let res: Vec<_> = names.iter().map(|s| format!("`{}`", s)).collect(); res.join(", ") } - /// The name of this module part. + /// The name of this pallet part. pub fn name(&self) -> &'static str { self.keyword.name() } - /// The name of this module part as `Ident`. + /// The name of this pallet part as `Ident`. pub fn ident(&self) -> Ident { self.keyword.ident() } diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index e64a364d2951e..2aecc5b993928 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -192,7 +192,7 @@ use proc_macro::TokenStream; /// construct_runtime!( /// pub enum Runtime with ... { /// ..., -/// Example: example::{Module, Storage, ..., Config}, +/// Example: example::{Pallet, Storage, ..., Config}, /// ..., /// } /// ); @@ -258,13 +258,13 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// NodeBlock = runtime::Block, /// UncheckedExtrinsic = UncheckedExtrinsic /// { -/// System: system::{Module, Call, Event, Config} = 0, -/// Test: test::{Module, Call} = 1, -/// Test2: test_with_long_module::{Module, Event}, +/// System: system::{Pallet, Call, Event, Config} = 0, +/// Test: test::{Pallet, Call} = 1, +/// Test2: test_with_long_module::{Pallet, Event}, /// /// // Module with instances -/// Test3_Instance1: test3::::{Module, Call, Storage, Event, Config, Origin}, -/// Test3_DefaultInstance: test3::{Module, Call, Storage, Event, Config, Origin} = 4, +/// Test3_Instance1: test3::::{Pallet, Call, Storage, Event, Config, Origin}, +/// Test3_DefaultInstance: test3::{Pallet, Call, Storage, Event, Config, Origin} = 4, /// } /// ) /// ``` @@ -306,7 +306,7 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// # Type definitions /// /// * The macro generates a type alias for each pallet to their `Module` (or `Pallet`). -/// E.g. `type System = frame_system::Module` +/// E.g. `type System = frame_system::Pallet` #[proc_macro] pub fn construct_runtime(input: TokenStream) -> TokenStream { construct_runtime::construct_runtime(input) diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index 137e055405a38..301d3fc5d9fa8 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -108,26 +108,26 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { match *self { #( Self::#fn_name ( #( ref #args_name, )* ) => { - let base_weight = #fn_weight; + let __pallet_base_weight = #fn_weight; - let weight = < + let __pallet_weight = < dyn #frame_support::dispatch::WeighData<( #( & #args_type, )* )> - >::weigh_data(&base_weight, ( #( #args_name, )* )); + >::weigh_data(&__pallet_base_weight, ( #( #args_name, )* )); - let class = < + let __pallet_class = < dyn #frame_support::dispatch::ClassifyDispatch< ( #( & #args_type, )* ) > - >::classify_dispatch(&base_weight, ( #( #args_name, )* )); + >::classify_dispatch(&__pallet_base_weight, ( #( #args_name, )* )); - let pays_fee = < + let __pallet_pays_fee = < dyn #frame_support::dispatch::PaysFee<( #( & #args_type, )* )> - >::pays_fee(&base_weight, ( #( #args_name, )* )); + >::pays_fee(&__pallet_base_weight, ( #( #args_name, )* )); #frame_support::dispatch::DispatchInfo { - weight, - class, - pays_fee, + weight: __pallet_weight, + class: __pallet_class, + pays_fee: __pallet_pays_fee, } }, )* @@ -162,9 +162,13 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { ) -> #frame_support::dispatch::DispatchResultWithPostInfo { match self { #( - Self::#fn_name( #( #args_name, )* ) => + Self::#fn_name( #( #args_name, )* ) => { + #frame_support::sp_tracing::enter_span!( + #frame_support::sp_tracing::trace_span!(stringify!(#fn_name)) + ); <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) - .map(Into::into).map_err(Into::into), + .map(Into::into).map_err(Into::into) + }, )* Self::__Ignore(_, _) => { let _ = origin; // Use origin for empty Call enum @@ -182,6 +186,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { impl<#type_impl_gen> #pallet_ident<#type_use_gen> #where_clause { #[doc(hidden)] + #[allow(dead_code)] pub fn call_functions() -> &'static [#frame_support::dispatch::FunctionMetadata] { &[ #( #frame_support::dispatch::FunctionMetadata { diff --git a/frame/support/procedural/src/pallet/expand/hooks.rs b/frame/support/procedural/src/pallet/expand/hooks.rs index 3976f2c602dde..2d12d5ecf9d46 100644 --- a/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/frame/support/procedural/src/pallet/expand/hooks.rs @@ -55,6 +55,9 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { for #pallet_ident<#type_use_gen> #where_clause { fn on_finalize(n: ::BlockNumber) { + #frame_support::sp_tracing::enter_span!( + #frame_support::sp_tracing::trace_span!("on_finalize") + ); < Self as #frame_support::traits::Hooks< ::BlockNumber @@ -86,6 +89,9 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { fn on_initialize( n: ::BlockNumber ) -> #frame_support::weights::Weight { + #frame_support::sp_tracing::enter_span!( + #frame_support::sp_tracing::trace_span!("on_initialize") + ); < Self as #frame_support::traits::Hooks< ::BlockNumber @@ -99,6 +105,10 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { for #pallet_ident<#type_use_gen> #where_clause { fn on_runtime_upgrade() -> #frame_support::weights::Weight { + #frame_support::sp_tracing::enter_span!( + #frame_support::sp_tracing::trace_span!("on_runtime_update") + ); + // log info about the upgrade. let new_storage_version = #frame_support::crate_to_pallet_version!(); let pallet_name = < diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index 47e4344c50d8e..fd3230edd1e74 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -102,6 +102,8 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { /// Type alias to `Pallet`, to be used by `construct_runtime`. /// /// Generated by `pallet` attribute macro. + #[deprecated(note = "use `Pallet` instead")] + #[allow(dead_code)] pub type Module<#type_decl_gen> = #pallet_ident<#type_use_gen>; // Implement `GetPalletVersion` for `Pallet` diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index 7948fca2faf06..86fb84b339b24 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -78,6 +78,8 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { let gen = &def.type_use_generics(storage.attr_span); let full_ident = quote::quote_spanned!(storage.attr_span => #ident<#gen> ); + let cfg_attrs = &storage.cfg_attrs; + let metadata_trait = match &storage.metadata { Metadata::Value { .. } => quote::quote_spanned!(storage.attr_span => #frame_support::storage::types::StorageValueMetadata @@ -128,7 +130,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { }; quote::quote_spanned!(storage.attr_span => - #frame_support::metadata::StorageEntryMetadata { + #(#cfg_attrs)* #frame_support::metadata::StorageEntryMetadata { name: #frame_support::metadata::DecodeDifferent::Encode( <#full_ident as #metadata_trait>::NAME ), @@ -159,6 +161,8 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { let type_use_gen = &def.type_use_generics(storage.attr_span); let full_ident = quote::quote_spanned!(storage.attr_span => #ident<#gen> ); + let cfg_attrs = &storage.cfg_attrs; + match &storage.metadata { Metadata::Value { value } => { let query = match storage.query_kind.as_ref().expect("Checked by def") { @@ -168,6 +172,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => + #(#cfg_attrs)* impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { #( #docs )* pub fn #getter() -> #query { @@ -186,6 +191,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => + #(#cfg_attrs)* impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { #( #docs )* pub fn #getter(k: KArg) -> #query where @@ -206,6 +212,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => + #(#cfg_attrs)* impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { #( #docs )* pub fn #getter(k1: KArg1, k2: KArg2) -> #query where @@ -233,10 +240,14 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { let prefix_struct_const = storage_def.ident.to_string(); let config_where_clause = &def.config.where_clause; + let cfg_attrs = &storage_def.cfg_attrs; + quote::quote_spanned!(storage_def.attr_span => + #(#cfg_attrs)* #prefix_struct_vis struct #prefix_struct_ident<#type_use_gen>( core::marker::PhantomData<(#type_use_gen,)> ); + #(#cfg_attrs)* impl<#type_impl_gen> #frame_support::traits::StorageInstance for #prefix_struct_ident<#type_use_gen> #config_where_clause diff --git a/frame/support/procedural/src/pallet/expand/store_trait.rs b/frame/support/procedural/src/pallet/expand/store_trait.rs index cdc7e2837245f..81ed52ac87a68 100644 --- a/frame/support/procedural/src/pallet/expand/store_trait.rs +++ b/frame/support/procedural/src/pallet/expand/store_trait.rs @@ -37,10 +37,12 @@ pub fn expand_store_trait(def: &mut Def) -> proc_macro2::TokenStream { let completed_where_clause = super::merge_where_clauses(&where_clauses); let storage_names = &def.storages.iter().map(|storage| &storage.ident).collect::>(); + let storage_cfg_attrs = &def.storages.iter().map(|storage| &storage.cfg_attrs).collect::>(); quote::quote_spanned!(trait_store.span() => #trait_vis trait #trait_store { #( + #(#storage_cfg_attrs)* type #storage_names; )* } @@ -48,6 +50,7 @@ pub fn expand_store_trait(def: &mut Def) -> proc_macro2::TokenStream { #completed_where_clause { #( + #(#storage_cfg_attrs)* type #storage_names = #storage_names<#type_use_gen>; )* } diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index c3f6751ef70b6..23406aeb23431 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -172,7 +172,7 @@ impl CallDef { } let mut call_var_attrs: Vec = - helper::take_item_attrs(&mut method.attrs)?; + helper::take_item_pallet_attrs(&mut method.attrs)?; if call_var_attrs.len() != 1 { let msg = if call_var_attrs.is_empty() { @@ -193,7 +193,7 @@ impl CallDef { }; let arg_attrs: Vec = - helper::take_item_attrs(&mut arg.attrs)?; + helper::take_item_pallet_attrs(&mut arg.attrs)?; if arg_attrs.len() > 1 { let msg = "Invalid pallet::call, argument has too many attributes"; diff --git a/frame/support/procedural/src/pallet/parse/config.rs b/frame/support/procedural/src/pallet/parse/config.rs index 44525164f03d5..045f2bff50e45 100644 --- a/frame/support/procedural/src/pallet/parse/config.rs +++ b/frame/support/procedural/src/pallet/parse/config.rs @@ -309,7 +309,7 @@ impl ConfigDef { || check_event_type(frame_system, trait_item, has_instance)?; // Parse for constant - let type_attrs_const: Vec = helper::take_item_attrs(trait_item)?; + let type_attrs_const: Vec = helper::take_item_pallet_attrs(trait_item)?; if type_attrs_const.len() > 1 { let msg = "Invalid attribute in pallet::config, only one attribute is expected"; @@ -339,7 +339,7 @@ impl ConfigDef { } } - let attr: Option = helper::take_first_item_attr( + let attr: Option = helper::take_first_item_pallet_attr( &mut item.attrs )?; diff --git a/frame/support/procedural/src/pallet/parse/event.rs b/frame/support/procedural/src/pallet/parse/event.rs index 7d8b7d075ef23..e5aad2b5b5d2c 100644 --- a/frame/support/procedural/src/pallet/parse/event.rs +++ b/frame/support/procedural/src/pallet/parse/event.rs @@ -163,7 +163,7 @@ impl EventDef { return Err(syn::Error::new(item.span(), "Invalid pallet::event, expected item enum")) }; - let event_attrs: Vec = helper::take_item_attrs(&mut item.attrs)?; + let event_attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; let attr_info = PalletEventAttrInfo::from_attrs(event_attrs)?; let metadata = attr_info.metadata.unwrap_or_else(Vec::new); let deposit_event = attr_info.deposit_event; diff --git a/frame/support/procedural/src/pallet/parse/helper.rs b/frame/support/procedural/src/pallet/parse/helper.rs index b6ee5c614d6f6..3a7729c47e1d7 100644 --- a/frame/support/procedural/src/pallet/parse/helper.rs +++ b/frame/support/procedural/src/pallet/parse/helper.rs @@ -47,7 +47,7 @@ pub trait MutItemAttrs { } /// Take the first pallet attribute (e.g. attribute like `#[pallet..]`) and decode it to `Attr` -pub fn take_first_item_attr(item: &mut impl MutItemAttrs) -> syn::Result> where +pub fn take_first_item_pallet_attr(item: &mut impl MutItemAttrs) -> syn::Result> where Attr: syn::parse::Parse, { let attrs = if let Some(attrs) = item.mut_item_attrs() { @@ -69,18 +69,29 @@ pub fn take_first_item_attr(item: &mut impl MutItemAttrs) -> syn::Result(item: &mut impl MutItemAttrs) -> syn::Result> where +pub fn take_item_pallet_attrs(item: &mut impl MutItemAttrs) -> syn::Result> where Attr: syn::parse::Parse, { let mut pallet_attrs = Vec::new(); - while let Some(attr) = take_first_item_attr(item)? { + while let Some(attr) = take_first_item_pallet_attr(item)? { pallet_attrs.push(attr) } Ok(pallet_attrs) } +/// Get all the cfg attributes (e.g. attribute like `#[cfg..]`) and decode them to `Attr` +pub fn get_item_cfg_attrs(attrs: &[syn::Attribute]) -> Vec { + attrs.iter().filter_map(|attr| { + if attr.path.segments.first().map_or(false, |segment| segment.ident == "cfg") { + Some(attr.clone()) + } else { + None + } + }).collect::>() +} + impl MutItemAttrs for syn::Item { fn mut_item_attrs(&mut self) -> Option<&mut Vec> { match self { diff --git a/frame/support/procedural/src/pallet/parse/mod.rs b/frame/support/procedural/src/pallet/parse/mod.rs index 4d8f239ded0af..39a40fc148bcd 100644 --- a/frame/support/procedural/src/pallet/parse/mod.rs +++ b/frame/support/procedural/src/pallet/parse/mod.rs @@ -89,7 +89,7 @@ impl Def { let mut type_values = vec![]; for (index, item) in items.iter_mut().enumerate() { - let pallet_attr: Option = helper::take_first_item_attr(item)?; + let pallet_attr: Option = helper::take_first_item_pallet_attr(item)?; match pallet_attr { Some(PalletAttr::Config(span)) if config.is_none() => diff --git a/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/frame/support/procedural/src/pallet/parse/pallet_struct.rs index 1c979741d9803..6c2c90bd61a5f 100644 --- a/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -78,7 +78,7 @@ impl PalletStructDef { return Err(syn::Error::new(item.span(), msg)); }; - let mut event_attrs: Vec = helper::take_item_attrs(&mut item.attrs)?; + let mut event_attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; if event_attrs.len() > 1 { let msg = "Invalid pallet::pallet, multiple argument pallet::generate_store found"; return Err(syn::Error::new(event_attrs[1].keyword.span(), msg)); diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index c0da266cfca2b..41ef337b76615 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -91,6 +91,8 @@ pub struct StorageDef { pub where_clause: Option, /// The span of the pallet::storage attribute. pub attr_span: proc_macro2::Span, + /// The `cfg` attributes. + pub cfg_attrs: Vec, } /// In `Foo` retrieve the argument at given position, i.e. A is argument at position 0. @@ -125,13 +127,15 @@ impl StorageDef { return Err(syn::Error::new(item.span(), "Invalid pallet::storage, expected item type")); }; - let mut attrs: Vec = helper::take_item_attrs(&mut item.attrs)?; + let mut attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; if attrs.len() > 1 { let msg = "Invalid pallet::storage, multiple argument pallet::getter found"; return Err(syn::Error::new(attrs[1].getter.span(), msg)); } let getter = attrs.pop().map(|attr| attr.getter); + let cfg_attrs = helper::get_item_cfg_attrs(&item.attrs); + let mut instances = vec![]; instances.push(helper::check_type_def_gen(&item.generics, item.ident.span())?); @@ -223,6 +227,7 @@ impl StorageDef { getter, query_kind, where_clause, + cfg_attrs, }) } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 64b7b7a8e2180..d6f133a8d20a3 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -1375,11 +1375,11 @@ macro_rules! decl_module { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> $module<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { - /// Deposits an event using `frame_system::Module::deposit_event`. + /// Deposits an event using `frame_system::Pallet::deposit_event`. $vis fn deposit_event( event: impl Into<< $trait_instance as $trait_name $(<$instance>)? >::Event> ) { - <$system::Module<$trait_instance>>::deposit_event(event.into()) + <$system::Pallet<$trait_instance>>::deposit_event(event.into()) } } }; @@ -1859,6 +1859,11 @@ macro_rules! decl_module { >($crate::sp_std::marker::PhantomData<($trait_instance, $( $instance)?)>) where $( $other_where_bounds )*; + /// Type alias to `Module`, to be used by `construct_runtime`. + #[allow(dead_code)] + pub type Pallet<$trait_instance $(, $instance $( = $module_default_instance)?)?> + = $mod_type<$trait_instance $(, $instance)?>; + $crate::decl_module! { @impl_on_initialize { $system } @@ -1962,23 +1967,23 @@ macro_rules! decl_module { match *self { $( $call_type::$fn_name( $( ref $param_name ),* ) => { - let base_weight = $weight; - let weight = >::weigh_data( - &base_weight, + let __pallet_base_weight = $weight; + let __pallet_weight = >::weigh_data( + &__pallet_base_weight, ($( $param_name, )*) ); - let class = >::classify_dispatch( - &base_weight, + let __pallet_class = >::classify_dispatch( + &__pallet_base_weight, ($( $param_name, )*) ); - let pays_fee = >::pays_fee( - &base_weight, + let __pallet_pays_fee = >::pays_fee( + &__pallet_base_weight, ($( $param_name, )*) ); $crate::dispatch::DispatchInfo { - weight, - class, - pays_fee, + weight: __pallet_weight, + class: __pallet_class, + pays_fee: __pallet_pays_fee, } }, )* diff --git a/frame/support/src/genesis_config.rs b/frame/support/src/genesis_config.rs index 8f915082e8bb0..3f7f943603e42 100644 --- a/frame/support/src/genesis_config.rs +++ b/frame/support/src/genesis_config.rs @@ -56,7 +56,7 @@ macro_rules! __impl_outer_config_types { /// specific genesis configuration. /// /// ```ignore -/// pub struct GenesisConfig for Runtime where AllModulesWithSystem = AllModulesWithSystem { +/// pub struct GenesisConfig for Runtime where AllPalletsWithSystem = AllPalletsWithSystem { /// rust_module_one: Option, /// ... /// } @@ -65,7 +65,7 @@ macro_rules! __impl_outer_config_types { macro_rules! impl_outer_config { ( pub struct $main:ident for $concrete:ident where - AllModulesWithSystem = $all_modules_with_system:ident + AllPalletsWithSystem = $all_pallets_with_system:ident { $( $config:ident => $snake:ident $( $instance:ident )? $( <$generic:ident> )*, )* @@ -103,7 +103,7 @@ macro_rules! impl_outer_config { )* $crate::BasicExternalities::execute_with_storage(storage, || { - <$all_modules_with_system as $crate::traits::OnGenesis>::on_genesis(); + <$all_pallets_with_system as $crate::traits::OnGenesis>::on_genesis(); }); Ok(()) diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index a06fd7a1d9b92..362c4c5a0a73b 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -87,6 +87,124 @@ pub const LOG_TARGET: &'static str = "runtime::frame-support"; #[derive(Debug, PartialEq, Eq, Clone)] pub enum Never {} +/// Generate a new type alias for [`storage::types::value::StorageValue`], +/// [`storage::types::value::StorageMap`] and [`storage::types::value::StorageDoubleMap`]. +/// +/// Useful for creating a *storage-like* struct for test and migrations. +/// +///``` +/// # use frame_support::generate_storage_alias; +/// use frame_support::codec; +/// use frame_support::Twox64Concat; +/// // generate a storage value with type u32. +/// generate_storage_alias!(Prefix, StorageName => Value); +/// +/// // generate a double map from `(u32, u32)` (with hasher `Twox64Concat`) to `Vec` +/// generate_storage_alias!( +/// OtherPrefix, OtherStorageName => DoubleMap< +/// (u32, u32), +/// (u32, u32), +/// Vec +/// > +/// ); +/// +/// // generate a map from `Config::AccountId` (with hasher `Twox64Concat`) to `Vec` +/// trait Config { type AccountId: codec::FullCodec; } +/// generate_storage_alias!( +/// Prefix, GenericStorage => Map<(Twox64Concat, T::AccountId), Vec> +/// ); +/// # fn main() {} +///``` +#[macro_export] +macro_rules! generate_storage_alias { + // without generic for $name. + ($pallet:ident, $name:ident => Map<($key:ty, $hasher:ty), $value:ty>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + type $name = $crate::storage::types::StorageMap< + [<$name Instance>], + $hasher, + $key, + $value, + >; + } + }; + ($pallet:ident, $name:ident => DoubleMap<($key1:ty, $hasher1:ty), ($key2:ty, $hasher2:ty), $value:ty>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + type $name = $crate::storage::types::StorageMap< + [<$name Instance>], + $hasher1, + $key1, + $hasher2, + $key2, + $value, + >; + } + }; + ($pallet:ident, $name:ident => Value<$value:ty>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + type $name = $crate::storage::types::StorageValue< + [<$name Instance>], + $value, + >; + } + }; + // with generic for $name. + ($pallet:ident, $name:ident<$t:ident : $bounds:tt> => Map<($key:ty, $hasher:ty), $value:ty>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + #[allow(type_alias_bounds)] + type $name<$t : $bounds> = $crate::storage::types::StorageMap< + [<$name Instance>], + $key, + $hasher, + $value, + >; + } + }; + ( + $pallet:ident, + $name:ident<$t:ident : $bounds:tt> + => DoubleMap<($key1:ty, $hasher1:ty), ($key2:ty, $hasher2:ty), $value:ty>) + => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + #[allow(type_alias_bounds)] + type $name<$t : $bounds> = $crate::storage::types::StorageMap< + [<$name Instance>], + $key1, + $hasher1, + $key2, + $hasher2, + $value, + >; + } + }; + ($pallet:ident, $name:ident<$t:ident : $bounds:tt> => Value<$value:ty>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + #[allow(type_alias_bounds)] + type $name<$t : $bounds> = $crate::storage::types::StorageValue< + [<$name Instance>], + $value, + $crate::storage::types::ValueQuery, + >; + } + }; + // helper used in all arms. + (@GENERATE_INSTANCE_STRUCT $pallet:ident, $name:ident) => { + $crate::paste::paste! { + struct [<$name Instance>]; + impl $crate::traits::StorageInstance for [<$name Instance>] { + fn pallet_prefix() -> &'static str { stringify!($pallet) } + const STORAGE_PREFIX: &'static str = stringify!($name); + } + } + } +} + /// Create new implementations of the [`Get`](crate::traits::Get) trait. /// /// The so-called parameter type can be created in four different ways: @@ -1229,6 +1347,9 @@ pub mod pallet_prelude { /// NOTE: OnRuntimeUpgrade is implemented with `Hooks::on_runtime_upgrade` and some additional /// logic. E.g. logic to write pallet version into storage. /// +/// NOTE: The macro also adds some tracing logic when implementing the above traits. The following +/// hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. +/// /// # Call: `#[pallet::call]` mandatory /// /// Implementation of pallet dispatchables. @@ -1418,6 +1539,18 @@ pub mod pallet_prelude { /// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; /// ``` /// +/// The optional attributes `#[cfg(..)]` allow conditional compilation for the storage. +/// +/// E.g: +/// ```ignore +/// #[cfg(feature = "my-feature")] +/// #[pallet::storage] +/// pub(super) type MyStorage = StorageValue<_, u32>; +/// ``` +/// +/// All the `cfg` attributes are automatically copied to the items generated for the storage, i.e. the +/// getter, storage prefix, and the metadata element etc. +/// /// NOTE: If the `QueryKind` generic parameter is still generic at this stage or is using some type /// alias then the generation of the getter might fail. In this case the getter can be implemented /// manually. diff --git a/frame/support/src/metadata.rs b/frame/support/src/metadata.rs index 2edaba1cb47e9..d0c59a0dfdc1d 100644 --- a/frame/support/src/metadata.rs +++ b/frame/support/src/metadata.rs @@ -58,7 +58,7 @@ pub use frame_metadata::{ /// /// struct Runtime; /// frame_support::impl_runtime_metadata! { -/// for Runtime with modules where Extrinsic = UncheckedExtrinsic +/// for Runtime with pallets where Extrinsic = UncheckedExtrinsic /// module0::Module as Module0 { index 0 } with, /// module1::Module as Module1 { index 1 } with, /// module2::Module as Module2 { index 2 } with Storage, @@ -69,7 +69,7 @@ pub use frame_metadata::{ #[macro_export] macro_rules! impl_runtime_metadata { ( - for $runtime:ident with modules where Extrinsic = $ext:ident + for $runtime:ident with pallets where Extrinsic = $ext:ident $( $rest:tt )* ) => { impl $runtime { @@ -421,7 +421,7 @@ mod tests { impl crate::traits::PalletInfo for TestRuntime { fn index() -> Option { let type_id = sp_std::any::TypeId::of::

(); - if type_id == sp_std::any::TypeId::of::>() { + if type_id == sp_std::any::TypeId::of::>() { return Some(0) } if type_id == sp_std::any::TypeId::of::() { @@ -435,7 +435,7 @@ mod tests { } fn name() -> Option<&'static str> { let type_id = sp_std::any::TypeId::of::

=

::Upper; /// `X`_. pub trait PerThing: Sized + Saturating + Copy + Default + Eq + PartialEq + Ord + PartialOrd + Bounded + fmt::Debug + + ops::Div + ops::Mul + Pow { /// The data type used to build this per-thingy. type Inner: BaseArithmetic + Unsigned + Copy + Into + fmt::Debug; @@ -70,14 +72,14 @@ pub trait PerThing: fn from_percent(x: Self::Inner) -> Self { let a: Self::Inner = x.min(100.into()); let b: Self::Inner = 100.into(); - Self::from_rational_approximation::(a, b) + Self::from_rational::(a, b) } /// Return the product of multiplication of this value by itself. fn square(self) -> Self { let p = Self::Upper::from(self.deconstruct()); let q = Self::Upper::from(Self::ACCURACY); - Self::from_rational_approximation::(p * p, q * q) + Self::from_rational::(p * p, q * q) } /// Return the part left when `self` is saturating-subtracted from `Self::one()`. @@ -204,7 +206,12 @@ pub trait PerThing: /// Converts a fraction into `Self`. #[cfg(feature = "std")] - fn from_fraction(x: f64) -> Self; + fn from_float(x: f64) -> Self; + + /// Same as `Self::from_float`. + #[deprecated = "Use from_float instead"] + #[cfg(feature = "std")] + fn from_fraction(x: f64) -> Self { Self::from_float(x) } /// Approximate the fraction `p/q` into a per-thing fraction. This will never overflow. /// @@ -219,16 +226,28 @@ pub trait PerThing: /// # fn main () { /// // 989/100 is technically closer to 99%. /// assert_eq!( - /// Percent::from_rational_approximation(989u64, 1000), + /// Percent::from_rational(989u64, 1000), /// Percent::from_parts(98), /// ); /// # } /// ``` - fn from_rational_approximation(p: N, q: N) -> Self + fn from_rational(p: N, q: N) -> Self where N: Clone + Ord + TryInto + TryInto + ops::Div + ops::Rem + ops::Add + Unsigned, Self::Inner: Into; + + /// Same as `Self::from_rational`. + #[deprecated = "Use from_rational instead"] + fn from_rational_approximation(p: N, q: N) -> Self + where + N: Clone + Ord + TryInto + TryInto + + ops::Div + ops::Rem + ops::Add + Unsigned + + Zero + One, + Self::Inner: Into, + { + Self::from_rational(p, q) + } } /// The rounding method to use. @@ -369,11 +388,11 @@ macro_rules! implement_per_thing { /// NOTE: saturate to 0 or 1 if x is beyond `[0, 1]` #[cfg(feature = "std")] - fn from_fraction(x: f64) -> Self { + fn from_float(x: f64) -> Self { Self::from_parts((x.max(0.).min(1.) * $max as f64) as Self::Inner) } - fn from_rational_approximation(p: N, q: N) -> Self + fn from_rational(p: N, q: N) -> Self where N: Clone + Ord + TryInto + TryInto + ops::Div + ops::Rem + ops::Add + Unsigned @@ -471,20 +490,31 @@ macro_rules! implement_per_thing { PerThing::square(self) } - /// See [`PerThing::from_fraction`]. + /// See [`PerThing::from_float`]. #[cfg(feature = "std")] - pub fn from_fraction(x: f64) -> Self { - ::from_fraction(x) + pub fn from_float(x: f64) -> Self { + ::from_float(x) } - /// See [`PerThing::from_rational_approximation`]. + /// See [`PerThing::from_rational`]. + #[deprecated = "Use `PerThing::from_rational` instead"] pub fn from_rational_approximation(p: N, q: N) -> Self where N: Clone + Ord + TryInto<$type> + TryInto<$upper_type> + ops::Div + ops::Rem + ops::Add + Unsigned, $type: Into, { - ::from_rational_approximation(p, q) + ::from_rational(p, q) + } + + /// See [`PerThing::from_rational`]. + pub fn from_rational(p: N, q: N) -> Self + where N: Clone + Ord + TryInto<$type> + + TryInto<$upper_type> + ops::Div + ops::Rem + + ops::Add + Unsigned, + $type: Into, + { + ::from_rational(p, q) } /// See [`PerThing::mul_floor`]. @@ -561,37 +591,13 @@ macro_rules! implement_per_thing { /// Saturating multiply. Compute `self * rhs`, saturating at the numeric bounds instead of /// overflowing. This operation is lossy. fn saturating_mul(self, rhs: Self) -> Self { - let a = self.0 as $upper_type; - let b = rhs.0 as $upper_type; - let m = <$upper_type>::from($max); - let parts = a * b / m; - // This will always fit into $type. - Self::from_parts(parts as $type) + self * rhs } /// Saturating exponentiation. Computes `self.pow(exp)`, saturating at the numeric /// bounds instead of overflowing. This operation is lossy. fn saturating_pow(self, exp: usize) -> Self { - if self.is_zero() || self.is_one() { - self - } else { - let p = <$name as PerThing>::Upper::from(self.deconstruct()); - let q = <$name as PerThing>::Upper::from(Self::ACCURACY); - let mut s = Self::one(); - for _ in 0..exp { - if s.is_zero() { - break; - } else { - // x^2 always fits in Self::Upper if x fits in Self::Inner. - // Verified by a test. - s = Self::from_rational_approximation( - <$name as PerThing>::Upper::from(s.deconstruct()) * p, - q * q, - ); - } - } - s - } + self.pow(exp) } } @@ -607,7 +613,7 @@ macro_rules! implement_per_thing { } } - impl crate::traits::Bounded for $name { + impl Bounded for $name { fn min_value() -> Self { ::zero() } @@ -617,13 +623,48 @@ macro_rules! implement_per_thing { } } + impl ops::Mul for $name { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + let a = self.0 as $upper_type; + let b = rhs.0 as $upper_type; + let m = <$upper_type>::from($max); + let parts = a * b / m; + // This will always fit into $type. + Self::from_parts(parts as $type) + } + } + + impl Pow for $name { + type Output = Self; + + fn pow(self, exp: usize) -> Self::Output { + if exp == 0 || self.is_one() { + return Self::one() + } + let mut result = self; + let mut exp = exp - 1; + while exp > 0 && !result.is_zero() { + if exp % 2 == 0 { + result = result.square(); + exp /= 2; + } else { + result = result * self; + exp -= 1; + } + } + result + } + } + impl ops::Div for $name { type Output = Self; fn div(self, rhs: Self) -> Self::Output { let p = self.0; let q = rhs.0; - Self::from_rational_approximation(p, q) + Self::from_rational(p, q) } } @@ -648,6 +689,13 @@ macro_rules! implement_per_thing { } } + impl ops::Div for $name where $type: TryFrom { + type Output = Self; + fn div(self, b: N) -> Self::Output { + <$type>::try_from(b).map_or(Self::zero(), |d| Self::from_parts(self.0 / d)) + } + } + #[cfg(test)] mod $test_mod { use codec::{Encode, Decode}; @@ -657,13 +705,13 @@ macro_rules! implement_per_thing { #[test] fn macro_expanded_correctly() { // needed for the `from_percent` to work. UPDATE: this is no longer needed; yet note - // that tests that use percentage or fractions such as $name::from_fraction(0.2) to + // that tests that use percentage or fractions such as $name::from_float(0.2) to // create values will most likely be inaccurate when used with per_things that are // not multiples of 100. // assert!($max >= 100); // assert!($max % 100 == 0); - // needed for `from_rational_approximation` + // needed for `from_rational` assert!(2 * ($max as $upper_type) < <$upper_type>::max_value()); assert!(<$upper_type>::from($max) < <$upper_type>::max_value()); @@ -737,11 +785,11 @@ macro_rules! implement_per_thing { assert_eq!($name::from_percent(100), $name::from_parts($max)); assert_eq!($name::from_percent(200), $name::from_parts($max)); - assert_eq!($name::from_fraction(0.0), $name::from_parts(Zero::zero())); - assert_eq!($name::from_fraction(0.1), $name::from_parts($max / 10)); - assert_eq!($name::from_fraction(1.0), $name::from_parts($max)); - assert_eq!($name::from_fraction(2.0), $name::from_parts($max)); - assert_eq!($name::from_fraction(-1.0), $name::from_parts(Zero::zero())); + assert_eq!($name::from_float(0.0), $name::from_parts(Zero::zero())); + assert_eq!($name::from_float(0.1), $name::from_parts($max / 10)); + assert_eq!($name::from_float(1.0), $name::from_parts($max)); + assert_eq!($name::from_float(2.0), $name::from_parts($max)); + assert_eq!($name::from_float(-1.0), $name::from_parts(Zero::zero())); } #[test] @@ -763,7 +811,7 @@ macro_rules! implement_per_thing { ($num_type:tt) => { // multiplication from all sort of from_percent assert_eq!( - $name::from_fraction(1.0) * $num_type::max_value(), + $name::from_float(1.0) * $num_type::max_value(), $num_type::max_value() ); if $max % 100 == 0 { @@ -773,7 +821,7 @@ macro_rules! implement_per_thing { 1, ); assert_eq!( - $name::from_fraction(0.5) * $num_type::max_value(), + $name::from_float(0.5) * $num_type::max_value(), $num_type::max_value() / 2, ); assert_eq_error_rate!( @@ -783,30 +831,30 @@ macro_rules! implement_per_thing { ); } else { assert_eq!( - $name::from_fraction(0.99) * <$num_type>::max_value(), + $name::from_float(0.99) * <$num_type>::max_value(), ( ( - u256ify!($name::from_fraction(0.99).0) * + u256ify!($name::from_float(0.99).0) * u256ify!(<$num_type>::max_value()) / u256ify!($max) ).as_u128() ) as $num_type, ); assert_eq!( - $name::from_fraction(0.50) * <$num_type>::max_value(), + $name::from_float(0.50) * <$num_type>::max_value(), ( ( - u256ify!($name::from_fraction(0.50).0) * + u256ify!($name::from_float(0.50).0) * u256ify!(<$num_type>::max_value()) / u256ify!($max) ).as_u128() ) as $num_type, ); assert_eq!( - $name::from_fraction(0.01) * <$num_type>::max_value(), + $name::from_float(0.01) * <$num_type>::max_value(), ( ( - u256ify!($name::from_fraction(0.01).0) * + u256ify!($name::from_float(0.01).0) * u256ify!(<$num_type>::max_value()) / u256ify!($max) ).as_u128() @@ -814,7 +862,7 @@ macro_rules! implement_per_thing { ); } - assert_eq!($name::from_fraction(0.0) * $num_type::max_value(), 0); + assert_eq!($name::from_float(0.0) * $num_type::max_value(), 0); // // multiplication with bounds assert_eq!($name::one() * $num_type::max_value(), $num_type::max_value()); @@ -828,7 +876,7 @@ macro_rules! implement_per_thing { // accuracy test assert_eq!( - $name::from_rational_approximation(1 as $type, 3) * 30 as $type, + $name::from_rational(1 as $type, 3) * 30 as $type, 10, ); @@ -837,10 +885,10 @@ macro_rules! implement_per_thing { #[test] fn per_thing_mul_rounds_to_nearest_number() { - assert_eq!($name::from_fraction(0.33) * 10u64, 3); - assert_eq!($name::from_fraction(0.34) * 10u64, 3); - assert_eq!($name::from_fraction(0.35) * 10u64, 3); - assert_eq!($name::from_fraction(0.36) * 10u64, 4); + assert_eq!($name::from_float(0.33) * 10u64, 3); + assert_eq!($name::from_float(0.34) * 10u64, 3); + assert_eq!($name::from_float(0.35) * 10u64, 3); + assert_eq!($name::from_float(0.36) * 10u64, 4); } #[test] @@ -858,33 +906,33 @@ macro_rules! implement_per_thing { ($num_type:tt) => { // within accuracy boundary assert_eq!( - $name::from_rational_approximation(1 as $num_type, 0), + $name::from_rational(1 as $num_type, 0), $name::one(), ); assert_eq!( - $name::from_rational_approximation(1 as $num_type, 1), + $name::from_rational(1 as $num_type, 1), $name::one(), ); assert_eq_error_rate!( - $name::from_rational_approximation(1 as $num_type, 3).0, + $name::from_rational(1 as $num_type, 3).0, $name::from_parts($max / 3).0, 2 ); assert_eq!( - $name::from_rational_approximation(1 as $num_type, 10), - $name::from_fraction(0.10), + $name::from_rational(1 as $num_type, 10), + $name::from_float(0.10), ); assert_eq!( - $name::from_rational_approximation(1 as $num_type, 4), - $name::from_fraction(0.25), + $name::from_rational(1 as $num_type, 4), + $name::from_float(0.25), ); assert_eq!( - $name::from_rational_approximation(1 as $num_type, 4), - $name::from_rational_approximation(2 as $num_type, 8), + $name::from_rational(1 as $num_type, 4), + $name::from_rational(2 as $num_type, 8), ); // no accurate anymore but won't overflow. assert_eq_error_rate!( - $name::from_rational_approximation( + $name::from_rational( $num_type::max_value() - 1, $num_type::max_value() ).0 as $upper_type, @@ -892,7 +940,7 @@ macro_rules! implement_per_thing { 2, ); assert_eq_error_rate!( - $name::from_rational_approximation( + $name::from_rational( $num_type::max_value() / 3, $num_type::max_value() ).0 as $upper_type, @@ -900,7 +948,7 @@ macro_rules! implement_per_thing { 2, ); assert_eq!( - $name::from_rational_approximation(1, $num_type::max_value()), + $name::from_rational(1, $num_type::max_value()), $name::zero(), ); }; @@ -914,28 +962,28 @@ macro_rules! implement_per_thing { // almost at the edge assert_eq!( - $name::from_rational_approximation(max_value - 1, max_value + 1), + $name::from_rational(max_value - 1, max_value + 1), $name::from_parts($max - 2), ); assert_eq!( - $name::from_rational_approximation(1, $max - 1), + $name::from_rational(1, $max - 1), $name::from_parts(1), ); assert_eq!( - $name::from_rational_approximation(1, $max), + $name::from_rational(1, $max), $name::from_parts(1), ); assert_eq!( - $name::from_rational_approximation(2, 2 * max_value - 1), + $name::from_rational(2, 2 * max_value - 1), $name::from_parts(1), ); assert_eq!( - $name::from_rational_approximation(1, max_value + 1), + $name::from_rational(1, max_value + 1), $name::zero(), ); assert_eq!( - $name::from_rational_approximation(3 * max_value / 2, 3 * max_value), - $name::from_fraction(0.5), + $name::from_rational(3 * max_value / 2, 3 * max_value), + $name::from_float(0.5), ); $(per_thing_from_rationale_approx_test!($test_units);)* @@ -943,66 +991,66 @@ macro_rules! implement_per_thing { #[test] fn per_things_mul_operates_in_output_type() { - // assert_eq!($name::from_fraction(0.5) * 100u32, 50u32); - assert_eq!($name::from_fraction(0.5) * 100u64, 50u64); - assert_eq!($name::from_fraction(0.5) * 100u128, 50u128); + // assert_eq!($name::from_float(0.5) * 100u32, 50u32); + assert_eq!($name::from_float(0.5) * 100u64, 50u64); + assert_eq!($name::from_float(0.5) * 100u128, 50u128); } #[test] fn per_thing_saturating_op_works() { assert_eq_error_rate!( - $name::from_fraction(0.5).saturating_add($name::from_fraction(0.4)).0 as $upper_type, - $name::from_fraction(0.9).0 as $upper_type, + $name::from_float(0.5).saturating_add($name::from_float(0.4)).0 as $upper_type, + $name::from_float(0.9).0 as $upper_type, 2, ); assert_eq_error_rate!( - $name::from_fraction(0.5).saturating_add($name::from_fraction(0.5)).0 as $upper_type, + $name::from_float(0.5).saturating_add($name::from_float(0.5)).0 as $upper_type, $name::one().0 as $upper_type, 2, ); assert_eq!( - $name::from_fraction(0.6).saturating_add($name::from_fraction(0.5)), + $name::from_float(0.6).saturating_add($name::from_float(0.5)), $name::one(), ); assert_eq_error_rate!( - $name::from_fraction(0.6).saturating_sub($name::from_fraction(0.5)).0 as $upper_type, - $name::from_fraction(0.1).0 as $upper_type, + $name::from_float(0.6).saturating_sub($name::from_float(0.5)).0 as $upper_type, + $name::from_float(0.1).0 as $upper_type, 2, ); assert_eq!( - $name::from_fraction(0.6).saturating_sub($name::from_fraction(0.6)), - $name::from_fraction(0.0), + $name::from_float(0.6).saturating_sub($name::from_float(0.6)), + $name::from_float(0.0), ); assert_eq!( - $name::from_fraction(0.6).saturating_sub($name::from_fraction(0.7)), - $name::from_fraction(0.0), + $name::from_float(0.6).saturating_sub($name::from_float(0.7)), + $name::from_float(0.0), ); assert_eq_error_rate!( - $name::from_fraction(0.5).saturating_mul($name::from_fraction(0.5)).0 as $upper_type, - $name::from_fraction(0.25).0 as $upper_type, + $name::from_float(0.5).saturating_mul($name::from_float(0.5)).0 as $upper_type, + $name::from_float(0.25).0 as $upper_type, 2, ); assert_eq_error_rate!( - $name::from_fraction(0.2).saturating_mul($name::from_fraction(0.2)).0 as $upper_type, - $name::from_fraction(0.04).0 as $upper_type, + $name::from_float(0.2).saturating_mul($name::from_float(0.2)).0 as $upper_type, + $name::from_float(0.04).0 as $upper_type, 2, ); assert_eq_error_rate!( - $name::from_fraction(0.1).saturating_mul($name::from_fraction(0.1)).0 as $upper_type, - $name::from_fraction(0.01).0 as $upper_type, + $name::from_float(0.1).saturating_mul($name::from_float(0.1)).0 as $upper_type, + $name::from_float(0.01).0 as $upper_type, 1, ); } #[test] fn per_thing_square_works() { - assert_eq!($name::from_fraction(1.0).square(), $name::from_fraction(1.0)); - assert_eq!($name::from_fraction(0.5).square(), $name::from_fraction(0.25)); - assert_eq!($name::from_fraction(0.1).square(), $name::from_fraction(0.01)); + assert_eq!($name::from_float(1.0).square(), $name::from_float(1.0)); + assert_eq!($name::from_float(0.5).square(), $name::from_float(0.25)); + assert_eq!($name::from_float(0.1).square(), $name::from_float(0.01)); assert_eq!( - $name::from_fraction(0.02).square(), + $name::from_float(0.02).square(), $name::from_parts((4 * <$upper_type>::from($max) / 100 / 100) as $type) ); } @@ -1011,30 +1059,30 @@ macro_rules! implement_per_thing { fn per_things_div_works() { // normal assert_eq_error_rate!( - ($name::from_fraction(0.1) / $name::from_fraction(0.20)).0 as $upper_type, - $name::from_fraction(0.50).0 as $upper_type, + ($name::from_float(0.1) / $name::from_float(0.20)).0 as $upper_type, + $name::from_float(0.50).0 as $upper_type, 2, ); assert_eq_error_rate!( - ($name::from_fraction(0.1) / $name::from_fraction(0.10)).0 as $upper_type, - $name::from_fraction(1.0).0 as $upper_type, + ($name::from_float(0.1) / $name::from_float(0.10)).0 as $upper_type, + $name::from_float(1.0).0 as $upper_type, 2, ); assert_eq_error_rate!( - ($name::from_fraction(0.1) / $name::from_fraction(0.0)).0 as $upper_type, - $name::from_fraction(1.0).0 as $upper_type, + ($name::from_float(0.1) / $name::from_float(0.0)).0 as $upper_type, + $name::from_float(1.0).0 as $upper_type, 2, ); // will not overflow assert_eq_error_rate!( - ($name::from_fraction(0.10) / $name::from_fraction(0.05)).0 as $upper_type, - $name::from_fraction(1.0).0 as $upper_type, + ($name::from_float(0.10) / $name::from_float(0.05)).0 as $upper_type, + $name::from_float(1.0).0 as $upper_type, 2, ); assert_eq_error_rate!( - ($name::from_fraction(1.0) / $name::from_fraction(0.5)).0 as $upper_type, - $name::from_fraction(1.0).0 as $upper_type, + ($name::from_float(1.0) / $name::from_float(0.5)).0 as $upper_type, + $name::from_float(1.0).0 as $upper_type, 2, ); } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index b5efcfb02198a..b00cbada9f476 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -21,7 +21,7 @@ use std::sync::Arc; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use sp_runtime::generic::BlockId; -use sp_runtime::Justification; +use sp_runtime::Justifications; use log::warn; use parking_lot::RwLock; @@ -84,8 +84,8 @@ pub trait HeaderBackend: Send + Sync { pub trait Backend: HeaderBackend + HeaderMetadata { /// Get block body. Returns `None` if block is not found. fn body(&self, id: BlockId) -> Result::Extrinsic>>>; - /// Get block justification. Returns `None` if justification does not exist. - fn justification(&self, id: BlockId) -> Result>; + /// Get block justifications. Returns `None` if no justification exists. + fn justifications(&self, id: BlockId) -> Result>; /// Get last finalized block hash. fn last_finalized(&self) -> Result; /// Returns data cache reference, if it is enabled on this backend. @@ -216,15 +216,16 @@ pub trait Backend: HeaderBackend + HeaderMetadata Result::Extrinsic>>; + ) -> Result>>; - /// Check if extrinsic exists. - fn have_extrinsic(&self, hash: &Block::Hash) -> Result { - Ok(self.extrinsic(hash)?.is_some()) + /// Check if indexed transaction exists. + fn has_indexed_transaction(&self, hash: &Block::Hash) -> Result { + Ok(self.indexed_transaction(hash)?.is_some()) } } diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 100c323024952..105c74bb317d7 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -21,6 +21,7 @@ sp-runtime = { version = "3.0.0", default-features = false, path = "../../runtim sp-inherents = { version = "3.0.0", default-features = false, path = "../../inherents" } sp-timestamp = { version = "3.0.0", default-features = false, path = "../../timestamp" } sp-consensus-slots = { version = "0.9.0", default-features = false, path = "../slots" } +sp-consensus = { version = "0.9.0", path = "../common", optional = true } [features] default = ["std"] @@ -32,4 +33,6 @@ std = [ "sp-runtime/std", "sp-inherents/std", "sp-timestamp/std", + "sp-consensus-slots/std", + "sp-consensus", ] diff --git a/primitives/consensus/aura/src/inherents.rs b/primitives/consensus/aura/src/inherents.rs index 750b13c77ff66..32af901311a30 100644 --- a/primitives/consensus/aura/src/inherents.rs +++ b/primitives/consensus/aura/src/inherents.rs @@ -51,12 +51,12 @@ impl AuraInherentData for InherentData { // TODO: Remove in the future. https://github.com/paritytech/substrate/issues/8029 #[cfg(feature = "std")] pub struct InherentDataProvider { - slot_duration: u64, + slot_duration: std::time::Duration, } #[cfg(feature = "std")] impl InherentDataProvider { - pub fn new(slot_duration: u64) -> Self { + pub fn new(slot_duration: std::time::Duration) -> Self { Self { slot_duration } @@ -88,7 +88,7 @@ impl ProvideInherentData for InherentDataProvider { use sp_timestamp::TimestampInherentData; let timestamp = inherent_data.timestamp_inherent_data()?; - let slot = *timestamp / self.slot_duration; + let slot = *timestamp / self.slot_duration.as_millis() as u64; inherent_data.put_data(INHERENT_IDENTIFIER, &slot) } diff --git a/primitives/consensus/aura/src/lib.rs b/primitives/consensus/aura/src/lib.rs index 8c9c57567c43f..a28e681fda27f 100644 --- a/primitives/consensus/aura/src/lib.rs +++ b/primitives/consensus/aura/src/lib.rs @@ -84,14 +84,39 @@ pub enum ConsensusLog { sp_api::decl_runtime_apis! { /// API necessary for block authorship with aura. pub trait AuraApi { - /// Return the slot duration in seconds for Aura. - /// Currently, only the value provided by this type at genesis - /// will be used. + /// Returns the slot duration for Aura. /// - /// Dynamic slot duration may be supported in the future. - fn slot_duration() -> u64; + /// Currently, only the value provided by this type at genesis will be used. + fn slot_duration() -> SlotDuration; // Return the current set of authorities. fn authorities() -> Vec; } } + +/// Aura slot duration. +/// +/// Internally stored as milliseconds. +#[derive(sp_runtime::RuntimeDebug, Encode, Decode, PartialEq, Clone, Copy)] +pub struct SlotDuration(u64); + +impl SlotDuration { + /// Initialize from the given milliseconds. + pub fn from_millis(val: u64) -> Self { + Self(val) + } + + /// Returns the slot duration in milli seconds. + pub fn get(&self) -> u64 { + self.0 + } +} + +#[cfg(feature = "std")] +impl sp_consensus::SlotData for SlotDuration { + fn slot_duration(&self) -> std::time::Duration { + std::time::Duration::from_millis(self.0) + } + + const SLOT_KEY: &'static [u8] = b"aura_slot_duration"; +} diff --git a/primitives/consensus/babe/src/inherents.rs b/primitives/consensus/babe/src/inherents.rs index 8aeab94df34a2..4c7c55f1cfd55 100644 --- a/primitives/consensus/babe/src/inherents.rs +++ b/primitives/consensus/babe/src/inherents.rs @@ -55,13 +55,13 @@ impl BabeInherentData for InherentData { // TODO: Remove in the future. https://github.com/paritytech/substrate/issues/8029 #[cfg(feature = "std")] pub struct InherentDataProvider { - slot_duration: u64, + slot_duration: std::time::Duration, } #[cfg(feature = "std")] impl InherentDataProvider { /// Constructs `Self` - pub fn new(slot_duration: u64) -> Self { + pub fn new(slot_duration: std::time::Duration) -> Self { Self { slot_duration } } } @@ -83,7 +83,7 @@ impl ProvideInherentData for InherentDataProvider { fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { let timestamp = inherent_data.timestamp_inherent_data()?; - let slot = *timestamp / self.slot_duration; + let slot = *timestamp / self.slot_duration.as_millis() as u64; inherent_data.put_data(INHERENT_IDENTIFIER, &slot) } diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 1b416c996fcf0..da9f089e4561c 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -242,8 +242,8 @@ impl AllowedSlots { #[cfg(feature = "std")] impl sp_consensus::SlotData for BabeGenesisConfiguration { - fn slot_duration(&self) -> u64 { - self.slot_duration + fn slot_duration(&self) -> std::time::Duration { + std::time::Duration::from_millis(self.slot_duration) } const SLOT_KEY: &'static [u8] = b"babe_configuration"; diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 8c5ae968158a2..6c3ae5fc060bf 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] thiserror = "1.0.21" -libp2p = { version = "0.35.1", default-features = false } +libp2p = { version = "0.36.0", default-features = false } log = "0.4.8" sp-core = { path= "../../core", version = "3.0.0"} sp-inherents = { version = "3.0.0", path = "../../inherents" } @@ -34,6 +34,7 @@ parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} wasm-timer = "0.2.5" +async-trait = "0.1.42" [dev-dependencies] futures = "0.3.9" diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index 00f84501dbb32..8d01da64b4cd6 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -18,7 +18,7 @@ //! Block import helpers. use sp_runtime::traits::{Block as BlockT, DigestItemFor, Header as HeaderT, NumberFor, HashFor}; -use sp_runtime::Justification; +use sp_runtime::{Justification, Justifications}; use serde::{Serialize, Deserialize}; use std::borrow::Cow; use std::collections::HashMap; @@ -128,8 +128,8 @@ pub struct BlockImportParams { /// re-executed in a runtime that checks digest equivalence -- the /// post-runtime digests are pushed back on after. pub header: Block::Header, - /// Justification provided for this block from the outside. - pub justification: Option, + /// Justification(s) provided for this block from the outside. + pub justifications: Option, /// Digest items that have been added after the runtime for external /// work, like a consensus signature. pub post_digests: Vec>, @@ -146,7 +146,7 @@ pub struct BlockImportParams { /// Intermediate values that are interpreted by block importers. Each block importer, /// upon handling a value, removes it from the intermediate list. The final block importer /// rejects block import if there are still intermediate values that remain unhandled. - pub intermediates: HashMap, Box>, + pub intermediates: HashMap, Box>, /// Auxiliary consensus data produced by the block. /// Contains a list of key-value pairs. If values are `None`, the keys /// will be deleted. @@ -174,7 +174,7 @@ impl BlockImportParams { ) -> Self { Self { origin, header, - justification: None, + justifications: None, post_digests: Vec::new(), body: None, storage_changes: None, @@ -219,7 +219,7 @@ impl BlockImportParams { BlockImportParams { origin: self.origin, header: self.header, - justification: self.justification, + justifications: self.justifications, post_digests: self.post_digests, body: self.body, storage_changes: None, @@ -264,14 +264,15 @@ impl BlockImportParams { } /// Block import trait. +#[async_trait::async_trait] pub trait BlockImport { /// The error type. type Error: std::error::Error + Send + 'static; /// The transaction type used by the backend. - type Transaction; + type Transaction: Send + 'static; /// Check block preconditions. - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result; @@ -279,56 +280,64 @@ pub trait BlockImport { /// Import a block. /// /// Cached data can be accessed through the blockchain cache. - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, cache: HashMap>, ) -> Result; } -impl BlockImport for crate::import_queue::BoxBlockImport { +#[async_trait::async_trait] +impl BlockImport for crate::import_queue::BoxBlockImport + where + Transaction: Send + 'static, +{ type Error = crate::error::Error; type Transaction = Transaction; /// Check block preconditions. - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - (**self).check_block(block) + (**self).check_block(block).await } /// Import a block. /// /// Cached data can be accessed through the blockchain cache. - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, cache: HashMap>, ) -> Result { - (**self).import_block(block, cache) + (**self).import_block(block, cache).await } } +#[async_trait::async_trait] impl BlockImport for Arc - where for<'r> &'r T: BlockImport + where + for<'r> &'r T: BlockImport, + T: Send + Sync, + Transaction: Send + 'static, { type Error = E; type Transaction = Transaction; - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - (&**self).check_block(block) + (&**self).check_block(block).await } - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, cache: HashMap>, ) -> Result { - (&**self).import_block(block, cache) + (&**self).import_block(block, cache).await } } diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs index 83f6271941fab..4220c7b14162d 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/primitives/consensus/common/src/import_queue.rs @@ -28,7 +28,7 @@ use std::collections::HashMap; -use sp_runtime::{Justification, traits::{Block as BlockT, Header as _, NumberFor}}; +use sp_runtime::{Justifications, traits::{Block as BlockT, Header as _, NumberFor}}; use crate::{ error::Error as ConsensusError, @@ -68,8 +68,8 @@ pub struct IncomingBlock { pub header: Option<::Header>, /// Block body if requested. pub body: Option::Extrinsic>>, - /// Justification if requested. - pub justification: Option, + /// Justification(s) if requested. + pub justifications: Option, /// The peer, we received this from pub origin: Option, /// Allow importing the block skipping state verification if parent state is missing. @@ -82,15 +82,16 @@ pub struct IncomingBlock { pub type CacheKeyId = [u8; 4]; /// Verify a justification of a block +#[async_trait::async_trait] pub trait Verifier: Send + Sync { /// Verify the given data and return the BlockImportParams and an optional /// new set of validators to import. If not, err with an Error-Message /// presented to the User in the logs. - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: B::Header, - justification: Option, + justifications: Option, body: Option>, ) -> Result<(BlockImportParams, Option)>>), String>; } @@ -102,13 +103,13 @@ pub trait Verifier: Send + Sync { pub trait ImportQueue: Send { /// Import bunch of blocks. fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); - /// Import a block justification. - fn import_justification( + /// Import block justifications. + fn import_justifications( &mut self, who: Origin, hash: B::Hash, number: NumberFor, - justification: Justification + justifications: Justifications ); /// Polls for actions to perform on the network. /// @@ -163,18 +164,18 @@ pub enum BlockImportError { } /// Single block import function. -pub fn import_single_block, Transaction>( - import_handle: &mut dyn BlockImport, +pub async fn import_single_block, Transaction: Send + 'static>( + import_handle: &mut impl BlockImport, block_origin: BlockOrigin, block: IncomingBlock, verifier: &mut V, ) -> Result>, BlockImportError> { - import_single_block_metered(import_handle, block_origin, block, verifier, None) + import_single_block_metered(import_handle, block_origin, block, verifier, None).await } /// Single block import function with metering. -pub(crate) fn import_single_block_metered, Transaction>( - import_handle: &mut dyn BlockImport, +pub(crate) async fn import_single_block_metered, Transaction: Send + 'static>( + import_handle: &mut impl BlockImport, block_origin: BlockOrigin, block: IncomingBlock, verifier: &mut V, @@ -182,8 +183,8 @@ pub(crate) fn import_single_block_metered, Transaction ) -> Result>, BlockImportError> { let peer = block.origin; - let (header, justification) = match (block.header, block.justification) { - (Some(header), justification) => (header, justification), + let (header, justifications) = match (block.header, block.justifications) { + (Some(header), justifications) => (header, justifications), (None, _) => { if let Some(ref peer) = peer { debug!(target: "sync", "Header {} was not provided by {} ", block.hash, peer); @@ -232,24 +233,28 @@ pub(crate) fn import_single_block_metered, Transaction parent_hash, allow_missing_state: block.allow_missing_state, import_existing: block.import_existing, - }))? { + }).await)? { BlockImportResult::ImportedUnknown { .. } => (), r => return Ok(r), // Any other successful result means that the block is already imported. } let started = wasm_timer::Instant::now(); - let (mut import_block, maybe_keys) = verifier.verify(block_origin, header, justification, block.body) - .map_err(|msg| { - if let Some(ref peer) = peer { - trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); - } else { - trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); - } - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification(false, started.elapsed()); - } - BlockImportError::VerificationFailed(peer.clone(), msg) - })?; + let (mut import_block, maybe_keys) = verifier.verify( + block_origin, + header, + justifications, + block.body + ).await.map_err(|msg| { + if let Some(ref peer) = peer { + trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); + } else { + trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); + } + if let Some(metrics) = metrics.as_ref() { + metrics.report_verification(false, started.elapsed()); + } + BlockImportError::VerificationFailed(peer.clone(), msg) + })?; if let Some(metrics) = metrics.as_ref() { metrics.report_verification(true, started.elapsed()); @@ -261,7 +266,7 @@ pub(crate) fn import_single_block_metered, Transaction } import_block.allow_missing_state = block.allow_missing_state; - let imported = import_handle.import_block(import_block.convert_transaction(), cache); + let imported = import_handle.import_block(import_block.convert_transaction(), cache).await; if let Some(metrics) = metrics.as_ref() { metrics.report_verification_and_import(started.elapsed()); } diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/primitives/consensus/common/src/import_queue/basic_queue.rs index f1b42e1460e59..7998ba1b3ec76 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/primitives/consensus/common/src/import_queue/basic_queue.rs @@ -18,7 +18,7 @@ use std::{pin::Pin, time::Duration, marker::PhantomData}; use futures::{prelude::*, task::Context, task::Poll}; use futures_timer::Delay; -use sp_runtime::{Justification, traits::{Block as BlockT, Header as HeaderT, NumberFor}}; +use sp_runtime::{Justification, Justifications, traits::{Block as BlockT, Header as HeaderT, NumberFor}}; use sp_utils::mpsc::{TracingUnboundedSender, tracing_unbounded, TracingUnboundedReceiver}; use prometheus_endpoint::Registry; @@ -112,22 +112,24 @@ impl ImportQueue for BasicQueue } } - fn import_justification( + fn import_justifications( &mut self, who: Origin, hash: B::Hash, number: NumberFor, - justification: Justification, + justifications: Justifications, ) { - let res = self.justification_sender.unbounded_send( - worker_messages::ImportJustification(who, hash, number, justification), - ); - - if res.is_err() { - log::error!( - target: "sync", - "import_justification: Background import task is no longer alive" + for justification in justifications { + let res = self.justification_sender.unbounded_send( + worker_messages::ImportJustification(who, hash, number, justification), ); + + if res.is_err() { + log::error!( + target: "sync", + "import_justification: Background import task is no longer alive" + ); + } } } @@ -153,7 +155,7 @@ mod worker_messages { /// to be run. /// /// Returns when `block_import` ended. -async fn block_import_process( +async fn block_import_process( mut block_import: BoxBlockImport, mut verifier: impl Verifier, mut result_sender: BufferedLinkSender, @@ -193,7 +195,7 @@ struct BlockImportWorker { } impl BlockImportWorker { - fn new, Transaction: Send>( + fn new, Transaction: Send + 'static>( result_sender: BufferedLinkSender, verifier: V, block_import: BoxBlockImport, @@ -281,7 +283,7 @@ impl BlockImportWorker { who: Origin, hash: B::Hash, number: NumberFor, - justification: Justification + justification: Justification, ) { let started = wasm_timer::Instant::now(); let success = self.justification_import.as_mut().map(|justification_import| { @@ -320,7 +322,7 @@ struct ImportManyBlocksResult { /// Import several blocks at once, returning import result for each block. /// /// This will yield after each imported block once, to ensure that other futures can be called as well. -async fn import_many_blocks, Transaction>( +async fn import_many_blocks, Transaction: Send + 'static>( import_handle: &mut BoxBlockImport, blocks_origin: BlockOrigin, blocks: Vec>, @@ -369,7 +371,7 @@ async fn import_many_blocks, Transaction>( block, verifier, metrics.clone(), - ) + ).await }; if let Some(metrics) = metrics.as_ref() { @@ -437,30 +439,32 @@ mod tests { use sp_test_primitives::{Block, BlockNumber, Extrinsic, Hash, Header}; use std::collections::HashMap; + #[async_trait::async_trait] impl Verifier for () { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: Header, - _justification: Option, + _justifications: Option, _body: Option>, ) -> Result<(BlockImportParams, Option)>>), String> { Ok((BlockImportParams::new(origin, header), None)) } } + #[async_trait::async_trait] impl BlockImport for () { type Error = crate::Error; type Transaction = Extrinsic; - fn check_block( + async fn check_block( &mut self, _block: BlockCheckParams, ) -> Result { Ok(ImportResult::imported(false)) } - fn import_block( + async fn import_block( &mut self, _block: BlockImportParams, _cache: HashMap>, @@ -541,7 +545,7 @@ mod tests { hash, header: Some(header), body: None, - justification: None, + justifications: None, origin: None, allow_missing_state: false, import_existing: false, @@ -554,12 +558,11 @@ mod tests { let mut import_justification = || { let hash = Hash::random(); - block_on(finality_sender.send(worker_messages::ImportJustification( libp2p::PeerId::random(), hash, 1, - Vec::new(), + (*b"TEST", Vec::new()), ))) .unwrap(); diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index b3aceb45e180f..27a43dbe02208 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -303,16 +303,8 @@ impl CanAuthorWith for NeverCanAuthor { /// A type from which a slot duration can be obtained. pub trait SlotData { /// Gets the slot duration. - fn slot_duration(&self) -> u64; + fn slot_duration(&self) -> sp_std::time::Duration; /// The static slot key const SLOT_KEY: &'static [u8]; } - -impl SlotData for u64 { - fn slot_duration(&self) -> u64 { - *self - } - - const SLOT_KEY: &'static [u8] = b"aura_slot_duration"; -} diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 95192acc4cb17..aedfbd748650c 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -586,8 +586,9 @@ ss58_address_format!( (65, "aventus", "Aventus Chain mainnet, standard account (*25519).") CrustAccount => (66, "crust", "Crust Network, standard account (*25519).") + SocialAccount => + (252, "social-network", "Social Network, standard account (*25519).") // Note: 16384 and above are reserved. - ); /// Set the default "version" (actually, this is a bit of a misnomer and the version byte is diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index 4062ba292352f..aae7668b5ec80 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -13,3 +13,4 @@ readme = "README.md" [dependencies] parking_lot = "0.11.1" kvdb = "0.9.0" + diff --git a/primitives/database/src/kvdb.rs b/primitives/database/src/kvdb.rs index b50ca53786f9f..d99fe6360ef7b 100644 --- a/primitives/database/src/kvdb.rs +++ b/primitives/database/src/kvdb.rs @@ -33,18 +33,73 @@ fn handle_err(result: std::io::Result) -> T { } /// Wrap RocksDb database into a trait object that implements `sp_database::Database` -pub fn as_database(db: D) -> std::sync::Arc> { +pub fn as_database(db: D) -> std::sync::Arc> + where D: KeyValueDB + 'static, H: Clone + AsRef<[u8]> +{ std::sync::Arc::new(DbAdapter(db)) } -impl Database for DbAdapter { +impl DbAdapter { + // Returns counter key and counter value if it exists. + fn read_counter(&self, col: ColumnId, key: &[u8]) -> error::Result<(Vec, Option)> { + // Add a key suffix for the counter + let mut counter_key = key.to_vec(); + counter_key.push(0); + Ok(match self.0.get(col, &counter_key).map_err(|e| error::DatabaseError(Box::new(e)))? { + Some(data) => { + let mut counter_data = [0; 4]; + if data.len() != 4 { + return Err(error::DatabaseError(Box::new( + std::io::Error::new(std::io::ErrorKind::Other, + format!("Unexpected counter len {}", data.len()))) + )) + } + counter_data.copy_from_slice(&data); + let counter = u32::from_le_bytes(counter_data); + (counter_key, Some(counter)) + }, + None => (counter_key, None) + }) + } +} + +impl> Database for DbAdapter { fn commit(&self, transaction: Transaction) -> error::Result<()> { let mut tx = DBTransaction::new(); for change in transaction.0.into_iter() { match change { Change::Set(col, key, value) => tx.put_vec(col, &key, value), Change::Remove(col, key) => tx.delete(col, &key), - _ => unimplemented!(), + Change::Store(col, key, value) => { + match self.read_counter(col, key.as_ref())? { + (counter_key, Some(mut counter)) => { + counter += 1; + tx.put(col, &counter_key, &counter.to_le_bytes()); + }, + (counter_key, None) => { + let d = 1u32.to_le_bytes(); + tx.put(col, &counter_key, &d); + tx.put_vec(col, key.as_ref(), value); + }, + } + } + Change::Reference(col, key) => { + if let (counter_key, Some(mut counter)) = self.read_counter(col, key.as_ref())? { + counter += 1; + tx.put(col, &counter_key, &counter.to_le_bytes()); + } + } + Change::Release(col, key) => { + if let (counter_key, Some(mut counter)) = self.read_counter(col, key.as_ref())? { + counter -= 1; + if counter == 0 { + tx.delete(col, &counter_key); + tx.delete(col, key.as_ref()); + } else { + tx.put(col, &counter_key, &counter.to_le_bytes()); + } + } + } } } self.0.write(tx).map_err(|e| error::DatabaseError(Box::new(e))) @@ -54,7 +109,7 @@ impl Database for DbAdapter { handle_err(self.0.get(col, key)) } - fn lookup(&self, _hash: &H) -> Option> { - unimplemented!(); + fn contains(&self, col: ColumnId, key: &[u8]) -> bool { + handle_err(self.0.has_key(col, key)) } } diff --git a/primitives/database/src/lib.rs b/primitives/database/src/lib.rs index 7107ea25c02c0..1fa0c8e49b015 100644 --- a/primitives/database/src/lib.rs +++ b/primitives/database/src/lib.rs @@ -32,16 +32,9 @@ pub type ColumnId = u32; pub enum Change { Set(ColumnId, Vec, Vec), Remove(ColumnId, Vec), - Store(H, Vec), - Release(H), -} - -/// An alteration to the database that references the data. -pub enum ChangeRef<'a, H> { - Set(ColumnId, &'a [u8], &'a [u8]), - Remove(ColumnId, &'a [u8]), - Store(H, &'a [u8]), - Release(H), + Store(ColumnId, H, Vec), + Reference(ColumnId, H), + Release(ColumnId, H), } /// A series of changes to the database that can be committed atomically. They do not take effect @@ -67,49 +60,27 @@ impl Transaction { self.0.push(Change::Remove(col, key.to_vec())) } /// Store the `preimage` of `hash` into the database, so that it may be looked up later with - /// `Database::lookup`. This may be called multiple times, but `Database::lookup` but subsequent + /// `Database::get`. This may be called multiple times, but subsequent /// calls will ignore `preimage` and simply increase the number of references on `hash`. - pub fn store(&mut self, hash: H, preimage: &[u8]) { - self.0.push(Change::Store(hash, preimage.to_vec())) + pub fn store(&mut self, col: ColumnId, hash: H, preimage: Vec) { + self.0.push(Change::Store(col, hash, preimage)) + } + /// Increase the number of references for `hash` in the database. + pub fn reference(&mut self, col: ColumnId, hash: H) { + self.0.push(Change::Reference(col, hash)) } /// Release the preimage of `hash` from the database. An equal number of these to the number of - /// corresponding `store`s must have been given before it is legal for `Database::lookup` to + /// corresponding `store`s must have been given before it is legal for `Database::get` to /// be unable to provide the preimage. - pub fn release(&mut self, hash: H) { - self.0.push(Change::Release(hash)) + pub fn release(&mut self, col: ColumnId, hash: H) { + self.0.push(Change::Release(col, hash)) } } -pub trait Database: Send + Sync { +pub trait Database>: Send + Sync { /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` /// will reflect the new state. - fn commit(&self, transaction: Transaction) -> error::Result<()> { - for change in transaction.0.into_iter() { - match change { - Change::Set(col, key, value) => self.set(col, &key, &value), - Change::Remove(col, key) => self.remove(col, &key), - Change::Store(hash, preimage) => self.store(&hash, &preimage), - Change::Release(hash) => self.release(&hash), - }?; - } - - Ok(()) - } - - /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` - /// will reflect the new state. - fn commit_ref<'a>(&self, transaction: &mut dyn Iterator>) -> error::Result<()> { - let mut tx = Transaction::new(); - for change in transaction { - match change { - ChangeRef::Set(col, key, value) => tx.set(col, key, value), - ChangeRef::Remove(col, key) => tx.remove(col, key), - ChangeRef::Store(hash, preimage) => tx.store(hash, preimage), - ChangeRef::Release(hash) => tx.release(hash), - } - } - self.commit(tx) - } + fn commit(&self, transaction: Transaction) -> error::Result<()>; /// Retrieve the value previously stored against `key` or `None` if /// `key` is not currently in the database. @@ -120,6 +91,11 @@ pub trait Database: Send + Sync { self.get(col, key).is_some() } + /// Check value size in the database possibly without retrieving it. + fn value_size(&self, col: ColumnId, key: &[u8]) -> Option { + self.get(col, key).map(|v| v.len()) + } + /// Call `f` with the value previously stored against `key`. /// /// This may be faster than `get` since it doesn't allocate. @@ -127,50 +103,6 @@ pub trait Database: Send + Sync { fn with_get(&self, col: ColumnId, key: &[u8], f: &mut dyn FnMut(&[u8])) { self.get(col, key).map(|v| f(&v)); } - - /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. - fn set(&self, col: ColumnId, key: &[u8], value: &[u8]) -> error::Result<()> { - let mut t = Transaction::new(); - t.set(col, key, value); - self.commit(t) - } - /// Remove the value of `key` in `col`. - fn remove(&self, col: ColumnId, key: &[u8]) -> error::Result<()> { - let mut t = Transaction::new(); - t.remove(col, key); - self.commit(t) - } - - /// Retrieve the first preimage previously `store`d for `hash` or `None` if no preimage is - /// currently stored. - fn lookup(&self, hash: &H) -> Option>; - - /// Call `f` with the preimage stored for `hash` and return the result, or `None` if no preimage - /// is currently stored. - /// - /// This may be faster than `lookup` since it doesn't allocate. - /// Use `with_lookup` helper function if you need `f` to return a value from `f` - fn with_lookup(&self, hash: &H, f: &mut dyn FnMut(&[u8])) { - self.lookup(hash).map(|v| f(&v)); - } - - /// Store the `preimage` of `hash` into the database, so that it may be looked up later with - /// `Database::lookup`. This may be called multiple times, but `Database::lookup` but subsequent - /// calls will ignore `preimage` and simply increase the number of references on `hash`. - fn store(&self, hash: &H, preimage: &[u8]) -> error::Result<()> { - let mut t = Transaction::new(); - t.store(hash.clone(), preimage); - self.commit(t) - } - - /// Release the preimage of `hash` from the database. An equal number of these to the number of - /// corresponding `store`s must have been given before it is legal for `Database::lookup` to - /// be unable to provide the preimage. - fn release(&self, hash: &H) -> error::Result<()> { - let mut t = Transaction::new(); - t.release(hash.clone()); - self.commit(t) - } } impl std::fmt::Debug for dyn Database { @@ -183,20 +115,13 @@ impl std::fmt::Debug for dyn Database { /// `key` is not currently in the database. /// /// This may be faster than `get` since it doesn't allocate. -pub fn with_get(db: &dyn Database, col: ColumnId, key: &[u8], mut f: impl FnMut(&[u8]) -> R) -> Option { +pub fn with_get>( + db: &dyn Database, + col: ColumnId, + key: &[u8], mut f: impl FnMut(&[u8]) -> R +) -> Option { let mut result: Option = None; let mut adapter = |k: &_| { result = Some(f(k)); }; db.with_get(col, key, &mut adapter); result } - -/// Call `f` with the preimage stored for `hash` and return the result, or `None` if no preimage -/// is currently stored. -/// -/// This may be faster than `lookup` since it doesn't allocate. -pub fn with_lookup(db: &dyn Database, hash: &H, mut f: impl FnMut(&[u8]) -> R) -> Option { - let mut result: Option = None; - let mut adapter = |k: &_| { result = Some(f(k)); }; - db.with_lookup(hash, &mut adapter); - result -} diff --git a/primitives/database/src/mem.rs b/primitives/database/src/mem.rs index 41af2e2f235c0..24ddf03319711 100644 --- a/primitives/database/src/mem.rs +++ b/primitives/database/src/mem.rs @@ -17,26 +17,41 @@ //! In-memory implementation of `Database` -use std::collections::HashMap; +use std::collections::{HashMap, hash_map::Entry}; use crate::{Database, Change, ColumnId, Transaction, error}; use parking_lot::RwLock; #[derive(Default)] /// This implements `Database` as an in-memory hash map. `commit` is not atomic. -pub struct MemDb - (RwLock<(HashMap, Vec>>, HashMap>)>); +pub struct MemDb(RwLock, (u32, Vec)>>>); -impl Database for MemDb - where H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash +impl Database for MemDb + where H: Clone + AsRef<[u8]> { fn commit(&self, transaction: Transaction) -> error::Result<()> { let mut s = self.0.write(); for change in transaction.0.into_iter() { match change { - Change::Set(col, key, value) => { s.0.entry(col).or_default().insert(key, value); }, - Change::Remove(col, key) => { s.0.entry(col).or_default().remove(&key); }, - Change::Store(hash, preimage) => { s.1.insert(hash, preimage); }, - Change::Release(hash) => { s.1.remove(&hash); }, + Change::Set(col, key, value) => { s.entry(col).or_default().insert(key, (1, value)); }, + Change::Remove(col, key) => { s.entry(col).or_default().remove(&key); }, + Change::Store(col, hash, value) => { + s.entry(col).or_default().entry(hash.as_ref().to_vec()) + .and_modify(|(c, _)| *c += 1) + .or_insert_with(|| (1, value)); + }, + Change::Reference(col, hash) => { + if let Entry::Occupied(mut entry) = s.entry(col).or_default().entry(hash.as_ref().to_vec()) { + entry.get_mut().0 += 1; + } + } + Change::Release(col, hash) => { + if let Entry::Occupied(mut entry) = s.entry(col).or_default().entry(hash.as_ref().to_vec()) { + entry.get_mut().0 -= 1; + if entry.get().0 == 0 { + entry.remove(); + } + } + } } } @@ -45,18 +60,11 @@ impl Database for MemDb fn get(&self, col: ColumnId, key: &[u8]) -> Option> { let s = self.0.read(); - s.0.get(&col).and_then(|c| c.get(key).cloned()) - } - - fn lookup(&self, hash: &H) -> Option> { - let s = self.0.read(); - s.1.get(hash).cloned() + s.get(&col).and_then(|c| c.get(key).map(|(_, v)| v.clone())) } } -impl MemDb - where H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash -{ +impl MemDb { /// Create a new instance pub fn new() -> Self { MemDb::default() @@ -65,7 +73,7 @@ impl MemDb /// Count number of values in a column pub fn count(&self, col: ColumnId) -> usize { let s = self.0.read(); - s.0.get(&col).map(|c| c.len()).unwrap_or(0) + s.get(&col).map(|c| c.len()).unwrap_or(0) } } diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 3ee37f5e31b93..1077f41048d59 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -228,6 +228,16 @@ pub trait Externalities: ExtensionStore { /// no transaction is open that can be closed. fn storage_commit_transaction(&mut self) -> Result<(), ()>; + /// Index specified transaction slice and store it. + fn storage_index_transaction(&mut self, _index: u32, _offset: u32) { + unimplemented!("storage_index_transaction"); + } + + /// Renew existing piece of transaction storage. + fn storage_renew_transaction_index(&mut self, _index: u32, _hash: &[u8], _size: u32) { + unimplemented!("storage_renew_transaction_index"); + } + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// Benchmarking related functionality and shouldn't be used anywhere else! /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! diff --git a/primitives/npos-elections/compact/Cargo.toml b/primitives/npos-elections/compact/Cargo.toml index e2fff8e2db010..63432a36efc80 100644 --- a/primitives/npos-elections/compact/Cargo.toml +++ b/primitives/npos-elections/compact/Cargo.toml @@ -19,3 +19,9 @@ syn = { version = "1.0.58", features = ["full", "visit"] } quote = "1.0" proc-macro2 = "1.0.6" proc-macro-crate = "1.0.0" + +[dev-dependencies] +parity-scale-codec = "2.0.1" +sp-arithmetic = { path = "../../arithmetic" } +sp-npos-elections = { path = ".." } +trybuild = "1.0.41" diff --git a/primitives/npos-elections/compact/src/assignment.rs b/primitives/npos-elections/compact/src/assignment.rs index 12f5ca2b41735..2c8edefbfb379 100644 --- a/primitives/npos-elections/compact/src/assignment.rs +++ b/primitives/npos-elections/compact/src/assignment.rs @@ -125,7 +125,7 @@ pub(crate) fn into_impl(count: usize, per_thing: syn::Type) -> TokenStream2 { let target = target_at(*t_idx).or_invalid_index()?; Ok((target, *p)) }) - .collect::, _npos::Error>>()?; + .collect::, _npos::Error>>()?; if sum >= #per_thing::one() { return Err(_npos::Error::CompactStakeOverflow); diff --git a/primitives/npos-elections/compact/src/codec.rs b/primitives/npos-elections/compact/src/codec.rs index 6e8d4d9277dbd..f75f99682711c 100644 --- a/primitives/npos-elections/compact/src/codec.rs +++ b/primitives/npos-elections/compact/src/codec.rs @@ -49,14 +49,14 @@ fn decode_impl( quote! { let #name = < - Vec<(_npos::codec::Compact<#voter_type>, _npos::codec::Compact<#target_type>)> + _npos::sp_std::prelude::Vec<(_npos::codec::Compact<#voter_type>, _npos::codec::Compact<#target_type>)> as _npos::codec::Decode >::decode(value)?; let #name = #name .into_iter() .map(|(v, t)| (v.0, t.0)) - .collect::>(); + .collect::<_npos::sp_std::prelude::Vec<_>>(); } }; @@ -65,7 +65,7 @@ fn decode_impl( quote! { let #name = < - Vec<( + _npos::sp_std::prelude::Vec<( _npos::codec::Compact<#voter_type>, (_npos::codec::Compact<#target_type>, _npos::codec::Compact<#weight_type>), _npos::codec::Compact<#target_type>, @@ -76,7 +76,7 @@ fn decode_impl( let #name = #name .into_iter() .map(|(v, (t1, w), t2)| (v.0, (t1.0, w.0), t2.0)) - .collect::>(); + .collect::<_npos::sp_std::prelude::Vec<_>>(); } }; @@ -90,7 +90,7 @@ fn decode_impl( quote! { let #name = < - Vec<( + _npos::sp_std::prelude::Vec<( _npos::codec::Compact<#voter_type>, [(_npos::codec::Compact<#target_type>, _npos::codec::Compact<#weight_type>); #c-1], _npos::codec::Compact<#target_type>, @@ -104,7 +104,7 @@ fn decode_impl( [ #inner_impl ], t_last.0, )) - .collect::>(); + .collect::<_npos::sp_std::prelude::Vec<_>>(); } }).collect::(); @@ -142,7 +142,7 @@ fn encode_impl(ident: syn::Ident, count: usize) -> TokenStream2 { _npos::codec::Compact(v.clone()), _npos::codec::Compact(t.clone()), )) - .collect::>(); + .collect::<_npos::sp_std::prelude::Vec<_>>(); #name.encode_to(&mut r); } }; @@ -160,7 +160,7 @@ fn encode_impl(ident: syn::Ident, count: usize) -> TokenStream2 { ), _npos::codec::Compact(t2.clone()), )) - .collect::>(); + .collect::<_npos::sp_std::prelude::Vec<_>>(); #name.encode_to(&mut r); } }; @@ -184,14 +184,14 @@ fn encode_impl(ident: syn::Ident, count: usize) -> TokenStream2 { [ #inners_compact_array ], _npos::codec::Compact(t_last.clone()), )) - .collect::>(); + .collect::<_npos::sp_std::prelude::Vec<_>>(); #name.encode_to(&mut r); } }).collect::(); quote!( impl _npos::codec::Encode for #ident { - fn encode(&self) -> Vec { + fn encode(&self) -> _npos::sp_std::prelude::Vec { let mut r = vec![]; #encode_impl_single #encode_impl_double diff --git a/primitives/npos-elections/compact/src/lib.rs b/primitives/npos-elections/compact/src/lib.rs index ed1837bae18b1..e558ae89ca93e 100644 --- a/primitives/npos-elections/compact/src/lib.rs +++ b/primitives/npos-elections/compact/src/lib.rs @@ -52,8 +52,14 @@ pub(crate) fn syn_err(message: &'static str) -> syn::Error { /// For example, the following generates a public struct with name `TestSolution` with `u16` voter /// type, `u8` target type and `Perbill` accuracy with maximum of 8 edges per voter. /// -/// ```ignore -/// generate_solution_type!(pub struct TestSolution::(8)) +/// ``` +/// # use sp_npos_elections_compact::generate_solution_type; +/// # use sp_arithmetic::per_things::Perbill; +/// generate_solution_type!(pub struct TestSolution::< +/// VoterIndex = u16, +/// TargetIndex = u8, +/// Accuracy = Perbill, +/// >(8)); /// ``` /// /// The given struct provides function to convert from/to Assignment: @@ -65,11 +71,13 @@ pub(crate) fn syn_err(message: &'static str) -> syn::Error { /// lead to many 0s in the solution. If prefixed with `#[compact]`, then a custom compact encoding /// for numbers will be used, similar to how `parity-scale-codec`'s `Compact` works. /// -/// ```ignore +/// ``` +/// # use sp_npos_elections_compact::generate_solution_type; +/// # use sp_arithmetic::per_things::Perbill; /// generate_solution_type!( /// #[compact] -/// pub struct TestSolutionCompact::(8) -/// ) +/// pub struct TestSolutionCompact::(8) +/// ); /// ``` #[proc_macro] pub fn generate_solution_type(item: TokenStream) -> TokenStream { @@ -119,14 +127,14 @@ fn struct_def( let name = field_name_for(1); // NOTE: we use the visibility of the struct for the fields as well.. could be made better. quote!( - #vis #name: Vec<(#voter_type, #target_type)>, + #vis #name: _npos::sp_std::prelude::Vec<(#voter_type, #target_type)>, ) }; let doubles = { let name = field_name_for(2); quote!( - #vis #name: Vec<(#voter_type, (#target_type, #weight_type), #target_type)>, + #vis #name: _npos::sp_std::prelude::Vec<(#voter_type, (#target_type, #weight_type), #target_type)>, ) }; @@ -135,7 +143,7 @@ fn struct_def( let field_name = field_name_for(c); let array_len = c - 1; quote!( - #vis #field_name: Vec<( + #vis #field_name: _npos::sp_std::prelude::Vec<( #voter_type, [(#target_type, #weight_type); #array_len], #target_type @@ -194,20 +202,19 @@ fn struct_def( all_edges } - fn unique_targets(&self) -> Vec { + fn unique_targets(&self) -> _npos::sp_std::prelude::Vec { // NOTE: this implementation returns the targets sorted, but we don't use it yet per // se, nor is the API enforcing it. - let mut all_targets: Vec = Vec::with_capacity(self.average_edge_count()); + use _npos::sp_std::collections::btree_set::BTreeSet; + + let mut all_targets: BTreeSet = BTreeSet::new(); let mut maybe_insert_target = |t: Self::Target| { - match all_targets.binary_search(&t) { - Ok(_) => (), - Err(pos) => all_targets.insert(pos, t) - } + all_targets.insert(t); }; #unique_targets_impl - all_targets + all_targets.into_iter().collect() } fn remove_voter(&mut self, to_remove: Self::Voter) -> bool { @@ -216,7 +223,7 @@ fn struct_def( } fn from_assignment( - assignments: Vec<_npos::Assignment>, + assignments: _npos::sp_std::prelude::Vec<_npos::Assignment>, index_of_voter: FV, index_of_target: FT, ) -> Result @@ -243,8 +250,8 @@ fn struct_def( self, voter_at: impl Fn(Self::Voter) -> Option, target_at: impl Fn(Self::Target) -> Option, - ) -> Result>, _npos::Error> { - let mut assignments: Vec<_npos::Assignment> = Default::default(); + ) -> Result<_npos::sp_std::prelude::Vec<_npos::Assignment>, _npos::Error> { + let mut assignments: _npos::sp_std::prelude::Vec<_npos::Assignment> = Default::default(); #into_impl Ok(assignments) } @@ -387,7 +394,7 @@ fn check_compact_attr(input: ParseStream) -> Result { } } -/// #[compact] pub struct CompactName::() +/// #[compact] pub struct CompactName::() impl Parse for SolutionDef { fn parse(input: ParseStream) -> syn::Result { // optional #[compact] @@ -406,9 +413,22 @@ impl Parse for SolutionDef { return Err(syn_err("Must provide 3 generic args.")) } - let mut types: Vec = generics.args.iter().map(|t| + let expected_types = ["VoterIndex", "TargetIndex", "Accuracy"]; + + let mut types: Vec = generics.args.iter().zip(expected_types.iter()).map(|(t, expected)| match t { - syn::GenericArgument::Type(ty) => Ok(ty.clone()), + syn::GenericArgument::Type(ty) => { + // this is now an error + Err(syn::Error::new_spanned(ty, format!("Expected binding: `{} = ...`", expected))) + }, + syn::GenericArgument::Binding(syn::Binding{ident, ty, ..}) => { + // check that we have the right keyword for this position in the argument list + if ident == expected { + Ok(ty.clone()) + } else { + Err(syn::Error::new_spanned(ident, format!("Expected `{}`", expected))) + } + } _ => Err(syn_err("Wrong type of generic provided. Must be a `type`.")), } ).collect::>()?; @@ -437,3 +457,12 @@ impl Parse for SolutionDef { fn field_name_for(n: usize) -> Ident { Ident::new(&format!("{}{}", PREFIX, n), Span::call_site()) } + +#[cfg(test)] +mod tests { + #[test] + fn ui_fail() { + let cases = trybuild::TestCases::new(); + cases.compile_fail("tests/ui/fail/*.rs"); + } +} diff --git a/primitives/npos-elections/compact/tests/ui/fail/missing_accuracy.rs b/primitives/npos-elections/compact/tests/ui/fail/missing_accuracy.rs new file mode 100644 index 0000000000000..4bbf4960a9483 --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/missing_accuracy.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_compact::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + VoterIndex = u16, + TargetIndex = u8, + Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/compact/tests/ui/fail/missing_accuracy.stderr b/primitives/npos-elections/compact/tests/ui/fail/missing_accuracy.stderr new file mode 100644 index 0000000000000..b6bb8f39ede61 --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/missing_accuracy.stderr @@ -0,0 +1,5 @@ +error: Expected binding: `Accuracy = ...` + --> $DIR/missing_accuracy.rs:6:2 + | +6 | Perbill, + | ^^^^^^^ diff --git a/primitives/npos-elections/compact/tests/ui/fail/missing_target.rs b/primitives/npos-elections/compact/tests/ui/fail/missing_target.rs new file mode 100644 index 0000000000000..7d7584340713c --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/missing_target.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_compact::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + VoterIndex = u16, + u8, + Accuracy = Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/compact/tests/ui/fail/missing_target.stderr b/primitives/npos-elections/compact/tests/ui/fail/missing_target.stderr new file mode 100644 index 0000000000000..d0c92c5bbd8e9 --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/missing_target.stderr @@ -0,0 +1,5 @@ +error: Expected binding: `TargetIndex = ...` + --> $DIR/missing_target.rs:5:2 + | +5 | u8, + | ^^ diff --git a/primitives/npos-elections/compact/tests/ui/fail/missing_voter.rs b/primitives/npos-elections/compact/tests/ui/fail/missing_voter.rs new file mode 100644 index 0000000000000..3ad77dc104ad7 --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/missing_voter.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_compact::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + u16, + TargetIndex = u8, + Accuracy = Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/compact/tests/ui/fail/missing_voter.stderr b/primitives/npos-elections/compact/tests/ui/fail/missing_voter.stderr new file mode 100644 index 0000000000000..a825d460c2fa8 --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/missing_voter.stderr @@ -0,0 +1,5 @@ +error: Expected binding: `VoterIndex = ...` + --> $DIR/missing_voter.rs:4:2 + | +4 | u16, + | ^^^ diff --git a/primitives/npos-elections/compact/tests/ui/fail/no_annotations.rs b/primitives/npos-elections/compact/tests/ui/fail/no_annotations.rs new file mode 100644 index 0000000000000..aaebb857b3d8d --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/no_annotations.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_compact::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + u16, + u8, + Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/compact/tests/ui/fail/no_annotations.stderr b/primitives/npos-elections/compact/tests/ui/fail/no_annotations.stderr new file mode 100644 index 0000000000000..28f1c2091546f --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/no_annotations.stderr @@ -0,0 +1,5 @@ +error: Expected binding: `VoterIndex = ...` + --> $DIR/no_annotations.rs:4:2 + | +4 | u16, + | ^^^ diff --git a/primitives/npos-elections/compact/tests/ui/fail/swap_voter_target.rs b/primitives/npos-elections/compact/tests/ui/fail/swap_voter_target.rs new file mode 100644 index 0000000000000..37124256b35e4 --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/swap_voter_target.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_compact::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + TargetIndex = u16, + VoterIndex = u8, + Accuracy = Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/compact/tests/ui/fail/swap_voter_target.stderr b/primitives/npos-elections/compact/tests/ui/fail/swap_voter_target.stderr new file mode 100644 index 0000000000000..5759fee7472fa --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/swap_voter_target.stderr @@ -0,0 +1,5 @@ +error: Expected `VoterIndex` + --> $DIR/swap_voter_target.rs:4:2 + | +4 | TargetIndex = u16, + | ^^^^^^^^^^^ diff --git a/primitives/npos-elections/fuzzer/src/compact.rs b/primitives/npos-elections/fuzzer/src/compact.rs index 91f734bb5b7cb..a49f6a535e5f0 100644 --- a/primitives/npos-elections/fuzzer/src/compact.rs +++ b/primitives/npos-elections/fuzzer/src/compact.rs @@ -4,7 +4,11 @@ use sp_npos_elections::sp_arithmetic::Percent; use sp_runtime::codec::{Encode, Error}; fn main() { - generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::(16)); + generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::< + VoterIndex = u32, + TargetIndex = u32, + Accuracy = Percent, + >(16)); loop { fuzz!(|fuzzer_data: &[u8]| { let result_decoded: Result = diff --git a/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs b/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs index 9727d1406ad24..49794f21fb256 100644 --- a/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs +++ b/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs @@ -112,7 +112,7 @@ fn iteration(mut candidate_count: usize, mut voter_count: usize, seed: u64) { let threshold = standard_threshold(rounds, voters.iter().map(|voter| voter.budget())); assert!( - pjr_check_core(&candidates, &voters, threshold), + pjr_check_core(&candidates, &voters, threshold).is_ok(), "unbalanced sequential phragmen must satisfy PJR", ); } diff --git a/primitives/npos-elections/src/helpers.rs b/primitives/npos-elections/src/helpers.rs index 10a49a084f102..091efdd36ea5f 100644 --- a/primitives/npos-elections/src/helpers.rs +++ b/primitives/npos-elections/src/helpers.rs @@ -95,15 +95,15 @@ mod tests { Assignment { who: 1u32, distribution: vec![ - (10u32, Perbill::from_fraction(0.5)), - (20, Perbill::from_fraction(0.5)), + (10u32, Perbill::from_float(0.5)), + (20, Perbill::from_float(0.5)), ], }, Assignment { who: 2u32, distribution: vec![ - (10, Perbill::from_fraction(0.33)), - (20, Perbill::from_fraction(0.67)), + (10, Perbill::from_float(0.33)), + (20, Perbill::from_float(0.67)), ], }, ]; diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index c87085ef9ff8f..05505d06f201e 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -119,6 +119,8 @@ pub use pjr::*; pub use codec; #[doc(hidden)] pub use sp_arithmetic; +#[doc(hidden)] +pub use sp_std; /// Simple Extension trait to easily convert `None` from index closures to `Err`. /// @@ -142,10 +144,22 @@ pub trait CompactSolution: Sized { const LIMIT: usize; /// The voter type. Needs to be an index (convert to usize). - type Voter: UniqueSaturatedInto + TryInto + TryFrom + Debug + Copy + Clone; + type Voter: UniqueSaturatedInto + + TryInto + + TryFrom + + Debug + + Copy + + Clone + + Bounded; /// The target type. Needs to be an index (convert to usize). - type Target: UniqueSaturatedInto + TryInto + TryFrom + Debug + Copy + Clone; + type Target: UniqueSaturatedInto + + TryInto + + TryFrom + + Debug + + Copy + + Clone + + Bounded; /// The weight/accuracy type of each vote. type Accuracy: PerThing128; @@ -359,7 +373,7 @@ impl Voter { .edges .into_iter() .filter_map(|e| { - let per_thing = P::from_rational_approximation(e.weight, budget); + let per_thing = P::from_rational(e.weight, budget); // trim zero edges. if per_thing.is_zero() { None } else { Some((e.who, per_thing)) } }).collect::>(); @@ -539,7 +553,7 @@ impl StakedAssignment { let distribution = self.distribution .into_iter() .filter_map(|(target, w)| { - let per_thing = P::from_rational_approximation(w, stake); + let per_thing = P::from_rational(w, stake); if per_thing == Bounded::min_value() { None } else { diff --git a/primitives/npos-elections/src/mock.rs b/primitives/npos-elections/src/mock.rs index ea8f3780e0e6a..14e4139c5d324 100644 --- a/primitives/npos-elections/src/mock.rs +++ b/primitives/npos-elections/src/mock.rs @@ -345,7 +345,7 @@ pub(crate) fn run_and_compare( for (candidate, per_thingy) in distribution { if let Some(float_assignment) = float_assignments.1.iter().find(|x| x.0 == candidate ) { assert_eq_error_rate!( - Output::from_fraction(float_assignment.1).deconstruct(), + Output::from_float(float_assignment.1).deconstruct(), per_thingy.deconstruct(), Output::Inner::one(), ); diff --git a/primitives/npos-elections/src/phragmms.rs b/primitives/npos-elections/src/phragmms.rs index ad93d2f18ef9a..644535d4c41c2 100644 --- a/primitives/npos-elections/src/phragmms.rs +++ b/primitives/npos-elections/src/phragmms.rs @@ -101,7 +101,7 @@ pub(crate) fn calculate_max_score( for edge in voter.edges.iter() { let edge_candidate = edge.candidate.borrow(); if edge_candidate.elected { - let edge_contribution: ExtendedBalance = P::from_rational_approximation( + let edge_contribution: ExtendedBalance = P::from_rational( edge.weight, edge_candidate.backed_stake, ).deconstruct().into(); diff --git a/primitives/npos-elections/src/pjr.rs b/primitives/npos-elections/src/pjr.rs index 61e0b2deb79fc..290110b14e650 100644 --- a/primitives/npos-elections/src/pjr.rs +++ b/primitives/npos-elections/src/pjr.rs @@ -37,7 +37,6 @@ use crate::{ use sp_std::{rc::Rc, vec::Vec}; use sp_std::collections::btree_map::BTreeMap; use sp_arithmetic::{traits::Zero, Perbill}; - /// The type used as the threshold. /// /// Just some reading sugar; Must always be same as [`ExtendedBalance`]; @@ -74,7 +73,7 @@ pub fn pjr_check( supports: &Supports, all_candidates: Vec, all_voters: Vec<(AccountId, VoteWeight, Vec)>, -) -> bool { +) -> Result<(), AccountId> { let t = standard_threshold(supports.len(), all_voters.iter().map(|voter| voter.1 as ExtendedBalance)); t_pjr_check(supports, all_candidates, all_voters, t) } @@ -119,7 +118,7 @@ pub fn t_pjr_check( all_candidates: Vec, all_voters: Vec<(AccountId, VoteWeight, Vec)>, t: Threshold, -) -> bool { +) -> Result<(), AccountId> { // First order of business: derive `(candidates, voters)` from `supports`. let (candidates, voters) = prepare_pjr_input( supports, @@ -133,18 +132,99 @@ pub fn t_pjr_check( /// The internal implementation of the PJR check after having the data converted. /// /// [`pjr_check`] or [`t_pjr_check`] are typically easier to work with. +/// +/// This function returns an `AccountId` in the `Err` case. This is the counter_example: the ID of the +/// unelected candidate with the highest prescore, such that `pre_score(counter_example) >= t`. pub fn pjr_check_core( candidates: &[CandidatePtr], voters: &[Voter], t: Threshold, -) -> bool { +) -> Result<(), AccountId> { let unelected = candidates.iter().filter(|c| !c.borrow().elected); let maybe_max_pre_score = unelected.map(|c| (pre_score(Rc::clone(c), voters, t), c.borrow().who.clone())).max(); // if unelected is empty then the solution is indeed PJR. - maybe_max_pre_score.map_or(true, |(max_pre_score, _)| max_pre_score < t) + match maybe_max_pre_score { + Some((max_pre_score, counter_example)) if max_pre_score >= t => Err(counter_example), + _ => Ok(()), + } } +/// Validate a challenge to an election result. +/// +/// A challenge to an election result is valid if there exists some counter_example for which +/// `pre_score(counter_example) >= threshold`. Validating an existing counter_example is computationally +/// cheaper than re-running the PJR check. +/// +/// This function uses the standard threshold. +/// +/// Returns `true` if the challenge is valid: the proposed solution does not satisfy PJR. +/// Returns `false` if the challenge is invalid: the proposed solution does in fact satisfy PJR. +pub fn validate_pjr_challenge( + counter_example: AccountId, + supports: &Supports, + all_candidates: Vec, + all_voters: Vec<(AccountId, VoteWeight, Vec)>, +) -> bool { + let threshold = standard_threshold(supports.len(), all_voters.iter().map(|voter| voter.1 as ExtendedBalance)); + validate_t_pjr_challenge(counter_example, supports, all_candidates, all_voters, threshold) +} + +/// Validate a challenge to an election result. +/// +/// A challenge to an election result is valid if there exists some counter_example for which +/// `pre_score(counter_example) >= threshold`. Validating an existing counter_example is computationally +/// cheaper than re-running the PJR check. +/// +/// This function uses a supplied threshold. +/// +/// Returns `true` if the challenge is valid: the proposed solution does not satisfy PJR. +/// Returns `false` if the challenge is invalid: the proposed solution does in fact satisfy PJR. +pub fn validate_t_pjr_challenge( + counter_example: AccountId, + supports: &Supports, + all_candidates: Vec, + all_voters: Vec<(AccountId, VoteWeight, Vec)>, + threshold: Threshold, +) -> bool { + let (candidates, voters) = prepare_pjr_input( + supports, + all_candidates, + all_voters, + ); + validate_pjr_challenge_core(counter_example, &candidates, &voters, threshold) +} +/// Validate a challenge to an election result. +/// +/// A challenge to an election result is valid if there exists some counter_example for which +/// `pre_score(counter_example) >= threshold`. Validating an existing counter_example is computationally +/// cheaper than re-running the PJR check. +/// +/// Returns `true` if the challenge is valid: the proposed solution does not satisfy PJR. +/// Returns `false` if the challenge is invalid: the proposed solution does in fact satisfy PJR. +fn validate_pjr_challenge_core( + counter_example: AccountId, + candidates: &[CandidatePtr], + voters: &[Voter], + threshold: Threshold, +) -> bool { + // Performing a linear search of the candidate list is not great, for obvious reasons. However, + // the alternatives are worse: + // + // - we could pre-sort the candidates list in `prepare_pjr_input` (n log n) which would let us + // binary search for the appropriate one here (log n). Overall runtime is `n log n` which is + // worse than the current runtime of `n`. + // + // - we could probably pre-sort the candidates list in `n` in `prepare_pjr_input` using some + // unsafe code leveraging the existing `candidates_index`: allocate an uninitialized vector of + // appropriate length, then copy in all the elements. We'd really prefer to avoid unsafe code + // in the runtime, though. + let candidate = match candidates.iter().find(|candidate| candidate.borrow().who == counter_example) { + None => return false, + Some(candidate) => candidate.clone(), + }; + pre_score(candidate, &voters, threshold) >= threshold +} /// Convert the data types that the user runtime has into ones that can be used by this module. /// @@ -283,7 +363,7 @@ fn slack(voter: &Voter, t: Threshold) -> Exte let candidate = edge.candidate.borrow(); if candidate.elected { let extra = - Perbill::one().min(Perbill::from_rational_approximation(t, candidate.backed_stake)) + Perbill::one().min(Perbill::from_rational(t, candidate.backed_stake)) * edge.weight; acc.saturating_add(extra) } else { @@ -315,6 +395,15 @@ mod tests { voter } + fn assert_core_failure( + candidates: &[CandidatePtr], + voters: &[Voter], + t: Threshold, + ) { + let counter_example = pjr_check_core(candidates, voters, t).unwrap_err(); + assert!(validate_pjr_challenge_core(counter_example, candidates, voters, t)); + } + #[test] fn slack_works() { let voter = setup_voter(10, vec![(1, 10, true), (2, 20, true)]); @@ -388,9 +477,9 @@ mod tests { // fyi. this is not PJR, obviously because the votes of 3 can bump the stake a lot but they // are being ignored. - assert!(!pjr_check_core(&candidates, &voters, 1)); - assert!(!pjr_check_core(&candidates, &voters, 10)); - assert!(!pjr_check_core(&candidates, &voters, 20)); + assert_core_failure(&candidates, &voters, 1); + assert_core_failure(&candidates, &voters, 10); + assert_core_failure(&candidates, &voters, 20); } // These next tests ensure that the threshold phase change property holds for us, but that's not their real purpose. @@ -476,7 +565,7 @@ mod tests { let mut prev_threshold = 0; // find the binary range containing the threshold beyond which the PJR check succeeds - while !pjr_check_core(&candidates, &voters, threshold) { + while pjr_check_core(&candidates, &voters, threshold).is_err() { prev_threshold = threshold; threshold = threshold.checked_mul(2).expect("pjr check must fail before we run out of capacity in u128"); } @@ -488,7 +577,7 @@ mod tests { while high_bound - low_bound > 1 { // maintain the invariant that low_bound fails and high_bound passes let test = low_bound + ((high_bound - low_bound) / 2); - if pjr_check_core(&candidates, &voters, test) { + if pjr_check_core(&candidates, &voters, test).is_ok() { high_bound = test; } else { low_bound = test; @@ -502,12 +591,12 @@ mod tests { let mut unexpected_failures = Vec::new(); let mut unexpected_successes = Vec::new(); for t in 0..=low_bound { - if pjr_check_core(&candidates, &voters, t) { + if pjr_check_core(&candidates, &voters, t).is_ok() { unexpected_successes.push(t); } } for t in high_bound..(high_bound*2) { - if !pjr_check_core(&candidates, &voters, t) { + if pjr_check_core(&candidates, &voters, t).is_err() { unexpected_failures.push(t); } } diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index edfea038ebc50..6304e50ec5868 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -1095,7 +1095,7 @@ mod score { is_score_better( claim.clone(), initial.clone(), - Perbill::from_rational_approximation(1u32, 10_000), + Perbill::from_rational(1u32, 10_000), ), true, ); @@ -1104,7 +1104,7 @@ mod score { is_score_better( claim.clone(), initial.clone(), - Perbill::from_rational_approximation(2u32, 10_000), + Perbill::from_rational(2u32, 10_000), ), true, ); @@ -1113,7 +1113,7 @@ mod score { is_score_better( claim.clone(), initial.clone(), - Perbill::from_rational_approximation(3u32, 10_000), + Perbill::from_rational(3u32, 10_000), ), true, ); @@ -1122,7 +1122,7 @@ mod score { is_score_better( claim.clone(), initial.clone(), - Perbill::from_rational_approximation(4u32, 10_000), + Perbill::from_rational(4u32, 10_000), ), true, ); @@ -1131,7 +1131,7 @@ mod score { is_score_better( claim.clone(), initial.clone(), - Perbill::from_rational_approximation(5u32, 10_000), + Perbill::from_rational(5u32, 10_000), ), false, ); @@ -1148,7 +1148,11 @@ mod solution_type { type TestAccuracy = Percent; - generate_solution_type!(pub struct TestSolutionCompact::(16)); + generate_solution_type!(pub struct TestSolutionCompact::< + VoterIndex = u32, + TargetIndex = u8, + Accuracy = TestAccuracy, + >(16)); #[allow(dead_code)] mod __private { @@ -1158,7 +1162,7 @@ mod solution_type { use sp_arithmetic::Percent; generate_solution_type!( #[compact] - struct InnerTestSolutionCompact::(12) + struct InnerTestSolutionCompact::(12) ); } @@ -1166,7 +1170,11 @@ mod solution_type { fn solution_struct_works_with_and_without_compact() { // we use u32 size to make sure compact is smaller. let without_compact = { - generate_solution_type!(pub struct InnerTestSolution::(16)); + generate_solution_type!(pub struct InnerTestSolution::< + VoterIndex = u32, + TargetIndex = u32, + Accuracy = Percent, + >(16)); let compact = InnerTestSolution { votes1: vec![(2, 20), (4, 40)], votes2: vec![ @@ -1180,7 +1188,11 @@ mod solution_type { }; let with_compact = { - generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::(16)); + generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::< + VoterIndex = u32, + TargetIndex = u32, + Accuracy = Percent, + >(16)); let compact = InnerTestSolutionCompact { votes1: vec![(2, 20), (4, 40)], votes2: vec![ diff --git a/primitives/runtime/src/generic/block.rs b/primitives/runtime/src/generic/block.rs index 7b2a10297f9c6..1b30d43ccaca7 100644 --- a/primitives/runtime/src/generic/block.rs +++ b/primitives/runtime/src/generic/block.rs @@ -30,7 +30,7 @@ use crate::traits::{ self, Member, Block as BlockT, Header as HeaderT, MaybeSerialize, MaybeMallocSizeOf, NumberFor, }; -use crate::Justification; +use crate::Justifications; /// Something to identify a block. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] @@ -112,5 +112,5 @@ pub struct SignedBlock { /// Full block. pub block: Block, /// Block justification. - pub justification: Option, + pub justifications: Option, } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index c8b93a083be4e..090c9781eb13d 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -96,7 +96,65 @@ pub use either::Either; /// the block itself would allow swapping justifications to change the block's hash /// (and thus fork the chain). Sending a `Justification` alongside a block instead /// bypasses this problem. -pub type Justification = Vec; +/// +/// Each justification is provided as an encoded blob, and is tagged with an ID +/// to identify the consensus engine that generated the proof (we might have +/// multiple justifications from different engines for the same block). +pub type Justification = (ConsensusEngineId, EncodedJustification); + +/// The encoded justification specific to a consensus engine. +pub type EncodedJustification = Vec; + +/// Collection of justifications for a given block, multiple justifications may +/// be provided by different consensus engines for the same block. +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] +pub struct Justifications(Vec); + +impl Justifications { + /// Return an iterator over the justifications. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Append a justification. Returns false if a justification with the same + /// `ConsensusEngineId` already exists, in which case the justification is + /// not inserted. + pub fn append(&mut self, justification: Justification) -> bool { + if self.get(justification.0).is_some() { + return false; + } + self.0.push(justification); + true + } + + /// Return the encoded justification for the given consensus engine, if it + /// exists. + pub fn get(&self, engine_id: ConsensusEngineId) -> Option<&EncodedJustification> { + self.iter().find(|j| j.0 == engine_id).map(|j| &j.1) + } + + /// Return a copy of the encoded justification for the given consensus + /// engine, if it exists. + pub fn into_justification(self, engine_id: ConsensusEngineId) -> Option { + self.into_iter().find(|j| j.0 == engine_id).map(|j| j.1) + } +} + +impl IntoIterator for Justifications { + type Item = Justification; + type IntoIter = sp_std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl From for Justifications { + fn from(justification: Justification) -> Self { + Self(vec![justification]) + } +} use traits::{Verify, Lazy}; @@ -392,7 +450,7 @@ pub type DispatchResult = sp_std::result::Result<(), DispatchError>; pub type DispatchResultWithInfo = sp_std::result::Result>; /// Reason why a dispatch call failed. -#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, RuntimeDebug)] +#[derive(Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum DispatchError { /// Some error occurred. @@ -416,6 +474,8 @@ pub enum DispatchError { ConsumerRemaining, /// There are no providers so the account cannot be created. NoProviders, + /// An error to do with tokens. + Token(TokenError), } /// Result of a `Dispatchable` which contains the `DispatchResult` and additional information about @@ -474,6 +534,49 @@ impl From for DispatchError { } } +/// Description of what went wrong when trying to complete an operation on a token. +#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub enum TokenError { + /// Funds are unavailable. + NoFunds, + /// Account that must exist would die. + WouldDie, + /// Account cannot exist with the funds that would be given. + BelowMinimum, + /// Account cannot be created. + CannotCreate, + /// The asset in question is unknown. + UnknownAsset, + /// Funds exist but are frozen. + Frozen, + /// An underflow would occur. + Underflow, + /// An overflow would occur. + Overflow, +} + +impl From for &'static str { + fn from(e: TokenError) -> &'static str { + match e { + TokenError::NoFunds => "Funds are unavailable", + TokenError::WouldDie => "Account that must exist would die", + TokenError::BelowMinimum => "Account cannot exist with the funds that would be given", + TokenError::CannotCreate => "Account cannot be created", + TokenError::UnknownAsset => "The asset in question is unknown", + TokenError::Frozen => "Funds exist but are frozen", + TokenError::Underflow => "An underflow would occur", + TokenError::Overflow => "An overflow would occur", + } + } +} + +impl From for DispatchError { + fn from(e: TokenError) -> DispatchError { + DispatchError::Token(e) + } +} + impl From<&'static str> for DispatchError { fn from(err: &'static str) -> DispatchError { DispatchError::Other(err) @@ -489,6 +592,7 @@ impl From for &'static str { DispatchError::Module { message, .. } => message.unwrap_or("Unknown module error"), DispatchError::ConsumerRemaining => "Consumer remaining", DispatchError::NoProviders => "No providers", + DispatchError::Token(e) => e.into(), } } } @@ -517,6 +621,10 @@ impl traits::Printable for DispatchError { } Self::ConsumerRemaining => "Consumer remaining".print(), Self::NoProviders => "No providers".print(), + Self::Token(e) => { + "Token error: ".print(); + <&'static str>::from(*e).print(); + } } } } @@ -531,6 +639,29 @@ impl traits::Printable for DispatchErrorWithPostInfo where } } +impl PartialEq for DispatchError { + fn eq(&self, other: &Self) -> bool { + use DispatchError::*; + + match (self, other) { + (CannotLookup, CannotLookup) | + (BadOrigin, BadOrigin) | + (ConsumerRemaining, ConsumerRemaining) | + (NoProviders, NoProviders) => true, + + (Token(l), Token(r)) => l == r, + (Other(l), Other(r)) => l == r, + + ( + Module { index: index_l, error: error_l, .. }, + Module { index: index_r, error: error_r, .. }, + ) => (index_l == index_r) && (error_l == error_r), + + _ => false, + } + } +} + /// This type specifies the outcome of dispatching a call to a module. /// /// In case of failure an error specific to the module is returned. @@ -768,6 +899,38 @@ mod tests { ); } + #[test] + fn dispatch_error_equality() { + use DispatchError::*; + + let variants = vec![ + Other("foo"), + Other("bar"), + CannotLookup, + BadOrigin, + Module { index: 1, error: 1, message: None }, + Module { index: 1, error: 2, message: None }, + Module { index: 2, error: 1, message: None }, + ConsumerRemaining, + NoProviders, + ]; + for (i, variant) in variants.iter().enumerate() { + for (j, other_variant) in variants.iter().enumerate() { + if i == j { + assert_eq!(variant, other_variant); + } else { + assert_ne!(variant, other_variant); + } + } + } + + // Ignores `message` field in `Module` variant. + assert_eq!( + Module { index: 1, error: 1, message: Some("foo") }, + Module { index: 1, error: 1, message: None}, + ); + } + #[test] fn multi_signature_ecdsa_verify_works() { let msg = &b"test-message"[..]; diff --git a/primitives/runtime/src/offchain/storage_lock.rs b/primitives/runtime/src/offchain/storage_lock.rs index 4c66db6c385c0..1529de4ab591a 100644 --- a/primitives/runtime/src/offchain/storage_lock.rs +++ b/primitives/runtime/src/offchain/storage_lock.rs @@ -439,11 +439,11 @@ pub trait BlockNumberProvider { /// /// In case of using crate `sp_runtime` without the crate `frame` /// system, it is already implemented for - /// `frame_system::Module` as: + /// `frame_system::Pallet` as: /// /// ```ignore /// fn current_block_number() -> Self { - /// frame_system::Module::block_number() + /// frame_system::Pallet::block_number() /// } /// ``` /// . diff --git a/primitives/sr-api/proc-macro/src/lib.rs b/primitives/sr-api/proc-macro/src/lib.rs deleted file mode 100644 index 4c4aa0d7cb929..0000000000000 --- a/primitives/sr-api/proc-macro/src/lib.rs +++ /dev/null @@ -1,190 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Macros for declaring and implementing runtime apis. - -#![recursion_limit = "512"] - -use proc_macro::TokenStream; - -mod impl_runtime_apis; -mod decl_runtime_apis; -mod utils; - -/// Tags given trait implementations as runtime apis. -/// -/// All traits given to this macro, need to be declared with the `decl_runtime_apis!` macro. -/// The implementation of the trait should follow the declaration given to the `decl_runtime_apis!` -/// macro, besides the `Block` type that is required as first generic parameter for each runtime -/// api trait. When implementing a runtime api trait, it is required that the trait is referenced -/// by a path, e.g. `impl my_trait::MyTrait for Runtime`. The macro will use this path to access -/// the declaration of the trait for the runtime side. -/// -/// The macro also generates the api implementations for the client side and provides it through -/// the `RuntimeApi` type. The `RuntimeApi` is hidden behind a `feature` called `std`. -/// -/// To expose version information about all implemented api traits, the constant -/// `RUNTIME_API_VERSIONS` is generated. This constant should be used to instantiate the `apis` -/// field of `RuntimeVersion`. -/// -/// # Example -/// -/// ```rust -/// use sp_version::create_runtime_str; -/// # -/// # use sp_runtime::traits::{GetNodeBlockType, Block as BlockT}; -/// # use test_client::runtime::Block; -/// # -/// # /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` -/// # /// trait are done by the `construct_runtime!` macro in a real runtime. -/// # pub struct Runtime {} -/// # impl GetNodeBlockType for Runtime { -/// # type NodeBlock = Block; -/// # } -/// # -/// # sp_api::decl_runtime_apis! { -/// # /// Declare the api trait. -/// # pub trait Balance { -/// # /// Get the balance. -/// # fn get_balance() -> u64; -/// # /// Set the balance. -/// # fn set_balance(val: u64); -/// # } -/// # pub trait BlockBuilder { -/// # fn build_block() -> Block; -/// # } -/// # } -/// -/// /// All runtime api implementations need to be done in one call of the macro! -/// sp_api::impl_runtime_apis! { -/// # impl sp_api::Core for Runtime { -/// # fn version() -> sp_version::RuntimeVersion { -/// # unimplemented!() -/// # } -/// # fn execute_block(_block: Block) {} -/// # fn initialize_block(_header: &::Header) {} -/// # } -/// -/// impl self::Balance for Runtime { -/// fn get_balance() -> u64 { -/// 1 -/// } -/// fn set_balance(_bal: u64) { -/// // Store the balance -/// } -/// } -/// -/// impl self::BlockBuilder for Runtime { -/// fn build_block() -> Block { -/// unimplemented!("Please implement me!") -/// } -/// } -/// } -/// -/// /// Runtime version. This needs to be declared for each runtime. -/// pub const VERSION: sp_version::RuntimeVersion = sp_version::RuntimeVersion { -/// spec_name: create_runtime_str!("node"), -/// impl_name: create_runtime_str!("test-node"), -/// authoring_version: 1, -/// spec_version: 1, -/// impl_version: 0, -/// // Here we are exposing the runtime api versions. -/// apis: RUNTIME_API_VERSIONS, -/// transaction_version: 1, -/// }; -/// -/// # fn main() {} -/// ``` -#[proc_macro] -pub fn impl_runtime_apis(input: TokenStream) -> TokenStream { - impl_runtime_apis::impl_runtime_apis_impl(input) -} - -/// Declares given traits as runtime apis. -/// -/// The macro will create two declarations, one for using on the client side and one for using -/// on the runtime side. The declaration for the runtime side is hidden in its own module. -/// The client side declaration gets two extra parameters per function, -/// `&self` and `at: &BlockId`. The runtime side declaration will match the given trait -/// declaration. Besides one exception, the macro adds an extra generic parameter `Block: BlockT` -/// to the client side and the runtime side. This generic parameter is usable by the user. -/// -/// For implementing these macros you should use the `impl_runtime_apis!` macro. -/// -/// # Example -/// -/// ```rust -/// sp_api::decl_runtime_apis! { -/// /// Declare the api trait. -/// pub trait Balance { -/// /// Get the balance. -/// fn get_balance() -> u64; -/// /// Set the balance. -/// fn set_balance(val: u64); -/// } -/// -/// /// You can declare multiple api traits in one macro call. -/// /// In one module you can call the macro at maximum one time. -/// pub trait BlockBuilder { -/// /// The macro adds an explicit `Block: BlockT` generic parameter for you. -/// /// You can use this generic parameter as you would defined it manually. -/// fn build_block() -> Block; -/// } -/// } -/// -/// # fn main() {} -/// ``` -/// -/// # Runtime api trait versioning -/// -/// To support versioning of the traits, the macro supports the attribute `#[api_version(1)]`. -/// The attribute supports any `u32` as version. By default, each trait is at version `1`, if no -/// version is provided. We also support changing the signature of a method. This signature -/// change is highlighted with the `#[changed_in(2)]` attribute above a method. A method that is -/// tagged with this attribute is callable by the name `METHOD_before_version_VERSION`. This -/// method will only support calling into wasm, trying to call into native will fail (change the -/// spec version!). Such a method also does not need to be implemented in the runtime. -/// -/// ```rust -/// sp_api::decl_runtime_apis! { -/// /// Declare the api trait. -/// #[api_version(2)] -/// pub trait Balance { -/// /// Get the balance. -/// fn get_balance() -> u64; -/// /// Set balance. -/// fn set_balance(val: u64); -/// /// Set balance, old version. -/// /// -/// /// Is callable by `set_balance_before_version_2`. -/// #[changed_in(2)] -/// fn set_balance(val: u16); -/// /// In version 2, we added this new function. -/// fn increase_balance(val: u64); -/// } -/// } -/// -/// # fn main() {} -/// ``` -/// -/// To check if a given runtime implements a runtime api trait, the `RuntimeVersion` has the -/// function `has_api()`. Also the `ApiExt` provides a function `has_api(at: &BlockId)` to -/// check if the runtime at the given block id implements the requested runtime api trait. -#[proc_macro] -pub fn decl_runtime_apis(input: TokenStream) -> TokenStream { - decl_runtime_apis::decl_runtime_apis_impl(input) -} diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 7907cda6fb4e7..65b7b638a9a2e 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -18,7 +18,7 @@ //! Concrete externalities implementation. use crate::{ - StorageKey, StorageValue, OverlayedChanges, + StorageKey, StorageValue, OverlayedChanges, IndexOperation, backend::Backend, overlayed_changes::OverlayedExtensions, }; use hash_db::Hasher; @@ -568,6 +568,36 @@ where } } + fn storage_index_transaction(&mut self, index: u32, offset: u32) { + trace!( + target: "state", + "{:04x}: IndexTransaction ({}): [{}..]", + self.id, + index, + offset, + ); + self.overlay.add_transaction_index(IndexOperation::Insert { + extrinsic: index, + offset, + }); + } + + /// Renew existing piece of data storage. + fn storage_renew_transaction_index(&mut self, index: u32, hash: &[u8], size: u32) { + trace!( + target: "state", + "{:04x}: RenewTransactionIndex ({}) {} bytes", + self.id, + HexDisplay::from(&hash), + size, + ); + self.overlay.add_transaction_index(IndexOperation::Renew { + extrinsic: index, + hash: hash.to_vec(), + size + }); + } + #[cfg(not(feature = "std"))] fn storage_changes_root(&mut self, _parent_hash: &[u8]) -> Result>, ()> { Ok(None) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 0167633d48070..0a664840df850 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -121,6 +121,7 @@ pub use crate::overlayed_changes::{ StorageChanges, StorageTransactionCache, OffchainChangesCollection, OffchainOverlayedChanges, + IndexOperation, }; pub use crate::backend::Backend; pub use crate::trie_backend_essence::{TrieBackendStorage, Storage}; diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 285bf2a73a148..1d3cbb59ba0c1 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -103,12 +103,35 @@ pub struct OverlayedChanges { children: Map, /// Offchain related changes. offchain: OffchainOverlayedChanges, + /// Transaction index changes, + transaction_index_ops: Vec, /// True if extrinsics stats must be collected. collect_extrinsics: bool, /// Collect statistic on this execution. stats: StateMachineStats, } +/// Transcation index operation. +#[derive(Debug, Clone)] +pub enum IndexOperation { + /// Insert transaction into index. + Insert { + /// Extrinsic index in the current block. + extrinsic: u32, + /// Data offset in the extrinsic. + offset: u32, + }, + /// Renew existing transaction storage. + Renew { + /// Extrinsic index in the current block. + extrinsic: u32, + /// Referenced index hash. + hash: Vec, + /// Expected data size. + size: u32, + } +} + /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. /// /// This contains all the changes to the storage and transactions to apply theses changes to the @@ -137,6 +160,10 @@ pub struct StorageChanges { /// Phantom data for block number until change trie support no_std. #[cfg(not(feature = "std"))] pub _ph: sp_std::marker::PhantomData, + + /// Changes to the transaction index, + #[cfg(feature = "std")] + pub transaction_index_changes: Vec, } #[cfg(feature = "std")] @@ -149,6 +176,7 @@ impl StorageChanges { Transaction, H::Out, Option>, + Vec, ) { ( self.main_storage_changes, @@ -157,6 +185,7 @@ impl StorageChanges { self.transaction, self.transaction_storage_root, self.changes_trie_transaction, + self.transaction_index_changes, ) } } @@ -214,6 +243,8 @@ impl Default for StorageChanges changes_trie_transaction: None, #[cfg(not(feature = "std"))] _ph: Default::default(), + #[cfg(feature = "std")] + transaction_index_changes: Default::default(), } } } @@ -543,6 +574,9 @@ impl OverlayedChanges { let (main_storage_changes, child_storage_changes) = self.drain_committed(); let offchain_storage_changes = self.offchain_drain_committed().collect(); + #[cfg(feature = "std")] + let transaction_index_changes = std::mem::take(&mut self.transaction_index_ops); + Ok(StorageChanges { main_storage_changes: main_storage_changes.collect(), child_storage_changes: child_storage_changes.map(|(sk, it)| (sk, it.0.collect())).collect(), @@ -551,6 +585,8 @@ impl OverlayedChanges { transaction_storage_root, #[cfg(feature = "std")] changes_trie_transaction, + #[cfg(feature = "std")] + transaction_index_changes, #[cfg(not(feature = "std"))] _ph: Default::default(), }) @@ -666,6 +702,11 @@ impl OverlayedChanges { None => self.offchain.remove(STORAGE_PREFIX, key), } } + + /// Add transaction index operation. + pub fn add_transaction_index(&mut self, op: IndexOperation) { + self.transaction_index_ops.push(op) + } } #[cfg(feature = "std")] diff --git a/primitives/timestamp/src/lib.rs b/primitives/timestamp/src/lib.rs index e6ef62b5c59c8..846ba67aec739 100644 --- a/primitives/timestamp/src/lib.rs +++ b/primitives/timestamp/src/lib.rs @@ -23,6 +23,7 @@ use codec::{Encode, Decode}; #[cfg(feature = "std")] use sp_inherents::ProvideInherentData; use sp_inherents::{InherentIdentifier, IsFatalError, InherentData}; +use sp_std::time::Duration; use sp_runtime::RuntimeString; @@ -43,6 +44,11 @@ impl Timestamp { pub const fn new(inner: u64) -> Self { Self(inner) } + + /// Returns `self` as [`Duration`]. + pub fn as_duration(&self) -> Duration { + Duration::from_millis(self.0) + } } impl sp_std::ops::Deref for Timestamp { @@ -100,8 +106,8 @@ impl From for u64 { } } -impl From for Timestamp { - fn from(duration: sp_std::time::Duration) -> Self { +impl From for Timestamp { + fn from(duration: Duration) -> Self { Timestamp(duration.as_millis() as u64) } } diff --git a/ss58-registry.json b/ss58-registry.json index 23ea3f8b6ed13..97b3b064e96df 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -60,9 +60,9 @@ "network": "plasm", "displayName": "Plasm Network", "symbols": ["PLM"], - "decimals": null, + "decimals": [15], "standardAccount": "*25519", - "website": null + "website": "https://plasmnet.io" }, { "prefix": 6, @@ -477,6 +477,15 @@ "decimals": [12], "standardAccount": "*25519", "website": "https://crust.network" + }, + { + "prefix": 252, + "network": "social-network", + "displayName": "Social Network", + "symbols": ["NET"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://social.network" } ] } diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index df1cca2101ad7..925a69e41bb48 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -33,3 +33,4 @@ sp-keystore = { version = "0.9.0", path = "../../primitives/keystore" } sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } +async-trait = "0.1.42" diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index db3e42f7e01c2..edba96d760fc2 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -24,7 +24,7 @@ use sp_consensus::{ BlockImportParams, BlockImport, BlockOrigin, Error as ConsensusError, ForkChoiceStrategy, }; -use sp_runtime::Justification; +use sp_runtime::{Justification, Justifications}; use sp_runtime::traits::{Block as BlockT}; use sp_runtime::generic::BlockId; use codec::alloc::collections::hash_map::HashMap; @@ -43,23 +43,24 @@ pub trait ClientExt: Sized { } /// Extension trait for a test client around block importing. +#[async_trait::async_trait] pub trait ClientBlockImportExt: Sized { /// Import block to the chain. No finality. - fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; + async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; /// Import a block and make it our best block if possible. - fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; + async fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; /// Import a block and finalize it. - fn import_as_final(&mut self, origin: BlockOrigin, block: Block) + async fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; - /// Import block with justification, finalizes block. - fn import_justified( + /// Import block with justification(s), finalizes block. + async fn import_justified( &mut self, origin: BlockOrigin, block: Block, - justification: Justification + justifications: Justifications, ) -> Result<(), ConsensusError>; } @@ -84,99 +85,132 @@ impl ClientExt for Client } /// This implementation is required, because of the weird api requirements around `BlockImport`. +#[async_trait::async_trait] impl ClientBlockImportExt for std::sync::Arc - where for<'r> &'r T: BlockImport + where + for<'r> &'r T: BlockImport, + Transaction: Send + 'static, + T: Send + Sync, { - fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import_as_best( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import_as_final( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.finalized = true; import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_justified( + async fn import_justified( &mut self, origin: BlockOrigin, block: Block, - justification: Justification, + justifications: Justifications, ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); - import.justification = Some(justification); + import.justifications = Some(justifications); import.body = Some(extrinsics); import.finalized = true; import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } } +#[async_trait::async_trait] impl ClientBlockImportExt for Client where Self: BlockImport, + RA: Send, + B: Send + Sync, + E: Send, + >::Transaction: Send, { - fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import_as_best( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import_as_final( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.finalized = true; import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_justified( + async fn import_justified( &mut self, origin: BlockOrigin, block: Block, - justification: Justification, + justifications: Justifications, ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); - import.justification = Some(justification); + import.justifications = Some(justifications); import.body = Some(extrinsics); import.finalized = true; import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 89da7929e64b8..96b7efff83380 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -53,6 +53,7 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] } sc-block-builder = { version = "0.9.0", path = "../../client/block-builder" } sc-executor = { version = "0.9.0", path = "../../client/executor" } substrate-test-runtime-client = { version = "2.0.0", path = "./client" } +futures = "0.3.9" [build-dependencies] substrate-wasm-builder = { version = "4.0.0", path = "../../utils/wasm-builder" } diff --git a/test-utils/runtime/client/src/trait_tests.rs b/test-utils/runtime/client/src/trait_tests.rs index 5a8083065ec0d..32d94dd618a7d 100644 --- a/test-utils/runtime/client/src/trait_tests.rs +++ b/test-utils/runtime/client/src/trait_tests.rs @@ -32,6 +32,7 @@ use substrate_test_runtime::{self, Transfer}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, HashFor}; use sc_block_builder::BlockBuilderProvider; +use futures::executor::block_on; /// helper to test the `leaves` implementation for various backends pub fn test_leaves_for_backend(backend: Arc) where @@ -57,7 +58,7 @@ pub fn test_leaves_for_backend(backend: Arc) where // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a1.hash()], @@ -69,7 +70,7 @@ pub fn test_leaves_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); #[allow(deprecated)] assert_eq!( @@ -83,7 +84,7 @@ pub fn test_leaves_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), @@ -96,7 +97,7 @@ pub fn test_leaves_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a4.hash()], @@ -109,7 +110,7 @@ pub fn test_leaves_for_backend(backend: Arc) where false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a5.hash()], @@ -130,7 +131,7 @@ pub fn test_leaves_for_backend(backend: Arc) where nonce: 0, }).unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a5.hash(), b2.hash()], @@ -143,7 +144,7 @@ pub fn test_leaves_for_backend(backend: Arc) where false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a5.hash(), b3.hash()], @@ -155,7 +156,7 @@ pub fn test_leaves_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash()], @@ -175,7 +176,7 @@ pub fn test_leaves_for_backend(backend: Arc) where nonce: 1, }).unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash(), c3.hash()], @@ -195,7 +196,7 @@ pub fn test_leaves_for_backend(backend: Arc) where nonce: 0, }).unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash(), c3.hash(), d2.hash()], @@ -220,7 +221,7 @@ pub fn test_children_for_backend(backend: Arc) where // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block_at( @@ -228,7 +229,7 @@ pub fn test_children_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 let a3 = client.new_block_at( @@ -236,7 +237,7 @@ pub fn test_children_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 let a4 = client.new_block_at( @@ -244,7 +245,7 @@ pub fn test_children_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 let a5 = client.new_block_at( @@ -252,7 +253,7 @@ pub fn test_children_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 let mut builder = client.new_block_at( @@ -268,7 +269,7 @@ pub fn test_children_for_backend(backend: Arc) where nonce: 0, }).unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 let b3 = client.new_block_at( @@ -276,7 +277,7 @@ pub fn test_children_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 let b4 = client.new_block_at( @@ -284,7 +285,7 @@ pub fn test_children_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4).unwrap(); + block_on(client.import(BlockOrigin::Own, b4)).unwrap(); // // B2 -> C3 let mut builder = client.new_block_at( @@ -300,7 +301,7 @@ pub fn test_children_for_backend(backend: Arc) where nonce: 1, }).unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 let mut builder = client.new_block_at( @@ -316,7 +317,7 @@ pub fn test_children_for_backend(backend: Arc) where nonce: 0, }).unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; @@ -349,7 +350,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block_at( @@ -357,7 +358,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc A3 let a3 = client.new_block_at( @@ -365,7 +366,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc A4 let a4 = client.new_block_at( @@ -373,7 +374,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc A5 let a5 = client.new_block_at( @@ -381,7 +382,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc B2 let mut builder = client.new_block_at( @@ -397,7 +398,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc B3 let b3 = client.new_block_at( @@ -405,7 +406,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc B4 let b4 = client.new_block_at( @@ -413,7 +414,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc C3 let mut builder = client.new_block_at( @@ -429,7 +430,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc D2 let mut builder = client.new_block_at( @@ -445,7 +446,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc> for Event { impl frame_support::traits::PalletInfo for Runtime { fn index() -> Option { let type_id = sp_std::any::TypeId::of::

(); - if type_id == sp_std::any::TypeId::of::>() { + if type_id == sp_std::any::TypeId::of::>() { return Some(0) } - if type_id == sp_std::any::TypeId::of::>() { + if type_id == sp_std::any::TypeId::of::>() { return Some(1) } - if type_id == sp_std::any::TypeId::of::>() { + if type_id == sp_std::any::TypeId::of::>() { return Some(2) } @@ -465,13 +465,13 @@ impl frame_support::traits::PalletInfo for Runtime { } fn name() -> Option<&'static str> { let type_id = sp_std::any::TypeId::of::

(); - if type_id == sp_std::any::TypeId::of::>() { + if type_id == sp_std::any::TypeId::of::>() { return Some("System") } - if type_id == sp_std::any::TypeId::of::>() { + if type_id == sp_std::any::TypeId::of::>() { return Some("Timestamp") } - if type_id == sp_std::any::TypeId::of::>() { + if type_id == sp_std::any::TypeId::of::>() { return Some("Babe") } @@ -762,7 +762,10 @@ cfg_if! { } impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { 1000 } + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(1000) + } + fn authorities() -> Vec { system::authorities().into_iter().map(|a| { let authority: sr25519::Public = a.into(); @@ -779,21 +782,21 @@ cfg_if! { c: (3, 10), genesis_authorities: system::authorities() .into_iter().map(|x|(x, 1)).collect(), - randomness: >::randomness(), + randomness: >::randomness(), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, } } fn current_epoch_start() -> Slot { - >::current_epoch_start() + >::current_epoch_start() } fn current_epoch() -> sp_consensus_babe::Epoch { - >::current_epoch() + >::current_epoch() } fn next_epoch() -> sp_consensus_babe::Epoch { - >::next_epoch() + >::next_epoch() } fn submit_report_equivocation_unsigned_extrinsic( @@ -1020,7 +1023,10 @@ cfg_if! { } impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { 1000 } + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(1000) + } + fn authorities() -> Vec { system::authorities().into_iter().map(|a| { let authority: sr25519::Public = a.into(); @@ -1037,21 +1043,21 @@ cfg_if! { c: (3, 10), genesis_authorities: system::authorities() .into_iter().map(|x|(x, 1)).collect(), - randomness: >::randomness(), + randomness: >::randomness(), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, } } fn current_epoch_start() -> Slot { - >::current_epoch_start() + >::current_epoch_start() } fn current_epoch() -> sp_consensus_babe::Epoch { - >::current_epoch() + >::current_epoch() } fn next_epoch() -> sp_consensus_babe::Epoch { - >::next_epoch() + >::next_epoch() } fn submit_report_equivocation_unsigned_extrinsic( @@ -1255,7 +1261,7 @@ mod tests { (BlockId::Hash(hash), block) }; - client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); // Allocation of 1024k while having ~2048k should succeed. let ret = client.runtime_api().vec_with_capacity(&new_block_id, 1048576); diff --git a/test-utils/test-runner/Cargo.toml b/test-utils/test-runner/Cargo.toml new file mode 100644 index 0000000000000..4d9d6125bd4e4 --- /dev/null +++ b/test-utils/test-runner/Cargo.toml @@ -0,0 +1,60 @@ +[package] +name = "test-runner" +version = "0.9.0" +authors = ["Parity Technologies "] +edition = "2018" +publish = false + +[dependencies] +# client deps +sc-executor = { version = "0.9.0", path = "../../client/executor" } +sc-service = { version = "0.9.0", path = "../../client/service" } +sc-informant = { version = "0.9.0", path = "../../client/informant" } +sc-network = { version = "0.9.0", path = "../../client/network" } +sc-cli = { version = "0.9.0", path = "../../client/cli" } +sc-basic-authorship = { version = "0.9.0", path = "../../client/basic-authorship" } +sc-rpc = { version = "3.0.0", path = "../../client/rpc" } +sc-transaction-pool = { version = "3.0.0", path = "../../client/transaction-pool" } +sc-transaction-graph = { version = "3.0.0", path = "../../client/transaction-pool/graph" } +sc-client-api = { version = "3.0.0", path = "../../client/api" } +sc-rpc-server = { version = "3.0.0", path = "../../client/rpc-servers" } +manual-seal = { package = "sc-consensus-manual-seal", version = "0.9.0", path = "../../client/consensus/manual-seal" } + +# primitive deps +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-block-builder = { version = "3.0.0", path = "../../primitives/block-builder" } +sp-api = { version = "3.0.0", path = "../../primitives/api" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } +sp-transaction-pool = { version = "3.0.0", path = "../../primitives/transaction-pool" } +sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } +sp-keystore = { version = "0.9.0", path = "../../primitives/keystore" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-session = { version = "3.0.0", path = "../../primitives/session" } +sp-offchain = { version = "3.0.0", path = "../../primitives/offchain" } +sp-inherents = { version = "3.0.0", path = "../../primitives/inherents" } +sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } + +sp-externalities = { version = "0.9.0", path = "../../primitives/externalities" } +sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } +sp-wasm-interface = { version = "3.0.0", path = "../../primitives/wasm-interface" } +sp-runtime-interface = { version = "3.0.0", path = "../../primitives/runtime-interface" } + +# pallets +frame-system = { version = "3.0.0", path = "../../frame/system" } + +parity-scale-codec = "1.3.1" +env_logger = "0.7.1" +log = "0.4.8" +futures01 = { package = "futures", version = "0.1.29" } +futures = { package = "futures", version = "0.3", features = ["compat"] } +rand = "0.7" +tokio = { version = "0.2", features = ["full"] } + +# Calling RPC +jsonrpc-core = "15.1" +[dev-dependencies] +sc-finality-grandpa = { version = "0.9.0", path = "../../client/finality-grandpa" } +sc-consensus-babe = { version = "0.9.0", path = "../../client/consensus/babe" } +sp-consensus-babe = { version = "0.9.0", path = "../../primitives/consensus/babe" } +node-cli = { version = "2.0.0", path = "../../bin/node/cli" } diff --git a/test-utils/test-runner/src/host_functions.rs b/test-utils/test-runner/src/host_functions.rs new file mode 100644 index 0000000000000..ca8790683e6c4 --- /dev/null +++ b/test-utils/test-runner/src/host_functions.rs @@ -0,0 +1,70 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[macro_export] +macro_rules! override_host_functions { + ($($fn_name:expr, $name:ident,)*) => {{ + let mut host_functions = vec![]; + $( + struct $name; + impl sp_wasm_interface::Function for $name { + fn name(&self) -> &str { + &$fn_name + } + + fn signature(&self) -> sp_wasm_interface::Signature { + sp_wasm_interface::Signature { + args: std::borrow::Cow::Owned(vec![ + sp_wasm_interface::ValueType::I32, + sp_wasm_interface::ValueType::I64, + sp_wasm_interface::ValueType::I32, + ]), + return_value: Some(sp_wasm_interface::ValueType::I32), + } + } + + fn execute( + &self, + context: &mut dyn sp_wasm_interface::FunctionContext, + _args: &mut dyn Iterator, + ) -> Result, String> { + ::into_ffi_value(true, context) + .map(sp_wasm_interface::IntoValue::into_value) + .map(Some) + } + } + host_functions.push(&$name as &'static dyn sp_wasm_interface::Function); + )* + host_functions + }}; +} + +/// Provides host functions that overrides runtime signature verification +/// to always return true. +pub struct SignatureVerificationOverride; + +impl sp_wasm_interface::HostFunctions for SignatureVerificationOverride { + fn host_functions() -> Vec<&'static dyn sp_wasm_interface::Function> { + override_host_functions!( + "ext_crypto_ecdsa_verify_version_1", EcdsaVerify, + "ext_crypto_ed25519_verify_version_1", Ed25519Verify, + "ext_crypto_sr25519_verify_version_1", Sr25519Verify, + "ext_crypto_sr25519_verify_version_2", Sr25519VerifyV2, + ) + } +} diff --git a/test-utils/test-runner/src/lib.rs b/test-utils/test-runner/src/lib.rs new file mode 100644 index 0000000000000..87ec4336d9523 --- /dev/null +++ b/test-utils/test-runner/src/lib.rs @@ -0,0 +1,312 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Test runner +//! # Substrate Test Runner +//! +//! Allows you to test +//!
+//! +//! - Migrations +//! - Runtime Upgrades +//! - Pallets and general runtime functionality. +//! +//! This works by running a full node with a Manual Seal-BABE™ hybrid consensus for block authoring. +//! +//!

Note

+//! The running node has no signature verification, which allows us author extrinsics for any account on chain. +//!
+//!
+//! +//!

How do I Use this?

+//! +//! +//! ```rust,ignore +//! use test_runner::{Node, ChainInfo, SignatureVerificationOverride, base_path, NodeConfig}; +//! use sc_finality_grandpa::GrandpaBlockImport; +//! use sc_service::{ +//! TFullBackend, TFullClient, Configuration, TaskManager, new_full_parts, BasePath, +//! DatabaseConfig, KeepBlocks, TransactionStorageMode, ChainSpec, Role, +//! config::{NetworkConfiguration, KeystoreConfig}, +//! }; +//! use std::sync::Arc; +//! use sp_inherents::InherentDataProviders; +//! use sc_consensus_babe::BabeBlockImport; +//! use sp_keystore::SyncCryptoStorePtr; +//! use sp_keyring::sr25519::Keyring::{Alice, Bob}; +//! use node_cli::chain_spec::development_config; +//! use sp_consensus_babe::AuthorityId; +//! use manual_seal::{ConsensusDataProvider, consensus::babe::BabeConsensusDataProvider}; +//! use sp_runtime::{traits::IdentifyAccount, MultiSigner, generic::Era}; +//! use sc_executor::WasmExecutionMethod; +//! use sc_network::{multiaddr, config::TransportConfig}; +//! use sc_client_api::execution_extensions::ExecutionStrategies; +//! use sc_informant::OutputFormat; +//! use sp_api::TransactionFor; +//! +//! type BlockImport = BabeBlockImport>; +//! +//! sc_executor::native_executor_instance!( +//! pub Executor, +//! node_runtime::api::dispatch, +//! node_runtime::native_version, +//! SignatureVerificationOverride, +//! ); +//! +//! struct Requirements; +//! +//! impl ChainInfo for Requirements { +//! /// Provide a Block type with an OpaqueExtrinsic +//! type Block = node_primitives::Block; +//! /// Provide an Executor type for the runtime +//! type Executor = Executor; +//! /// Provide the runtime itself +//! type Runtime = node_runtime::Runtime; +//! /// A touch of runtime api +//! type RuntimeApi = node_runtime::RuntimeApi; +//! /// A pinch of SelectChain implementation +//! type SelectChain = sc_consensus::LongestChain, Self::Block>; +//! /// A slice of concrete BlockImport type +//! type BlockImport = BlockImport< +//! Self::Block, +//! TFullBackend, +//! TFullClient, +//! Self::SelectChain, +//! >; +//! /// and a dash of SignedExtensions +//! type SignedExtras = node_runtime::SignedExtra; +//! +//! /// Create your signed extras here. +//! fn signed_extras( +//! from: ::AccountId, +//! ) -> Self::SignedExtension { +//! let nonce = frame_system::Pallet::::account_nonce(from); +//! +//! ( +//! frame_system::CheckSpecVersion::::new(), +//! frame_system::CheckTxVersion::::new(), +//! frame_system::CheckGenesis::::new(), +//! frame_system::CheckMortality::::from(Era::Immortal), +//! frame_system::CheckNonce::::from(nonce), +//! frame_system::CheckWeight::::new(), +//! pallet_transaction_payment::ChargeTransactionPayment::::from(0), +//! ) +//! } +//! +//! /// The function signature tells you all you need to know. ;) +//! fn create_client_parts(config: &Configuration) -> Result< +//! ( +//! Arc>, +//! Arc>, +//! KeyStorePtr, +//! TaskManager, +//! InherentDataProviders, +//! Option, +//! Self::Block +//! >, +//! > +//! >>, +//! Self::SelectChain, +//! Self::BlockImport +//! ), +//! sc_service::Error +//! > { +//! let ( +//! client, +//! backend, +//! keystore, +//! task_manager, +//! ) = new_full_parts::(config)?; +//! let client = Arc::new(client); +//! +//! let inherent_providers = InherentDataProviders::new(); +//! let select_chain = sc_consensus::LongestChain::new(backend.clone()); +//! +//! let (grandpa_block_import, ..) = +//! sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain.clone())?; +//! +//! let (block_import, babe_link) = sc_consensus_babe::block_import( +//! sc_consensus_babe::Config::get_or_compute(&*client)?, +//! grandpa_block_import, +//! client.clone(), +//! )?; +//! +//! let consensus_data_provider = BabeConsensusDataProvider::new( +//! client.clone(), +//! keystore.clone(), +//! &inherent_providers, +//! babe_link.epoch_changes().clone(), +//! vec![(AuthorityId::from(Alice.public()), 1000)] +//! ) +//! .expect("failed to create ConsensusDataProvider"); +//! +//! Ok(( +//! client, +//! backend, +//! keystore, +//! task_manager, +//! inherent_providers, +//! Some(Box::new(consensus_data_provider)), +//! select_chain, +//! block_import +//! )) +//! } +//! +//! fn dispatch_with_root(call: ::Call, node: &mut Node) { +//! let alice = MultiSigner::from(Alice.public()).into_account(); +//! // for chains that support sudo, otherwise, you'd have to use pallet-democracy here. +//! let call = pallet_sudo::Call::sudo(Box::new(call)); +//! node.submit_extrinsic(call, alice); +//! node.seal_blocks(1); +//! } +//! } +//! +//! /// And now for the most basic test +//! +//! #[test] +//! fn simple_balances_test() { +//! // given +//! let config = NodeConfig { +//! execution_strategies: ExecutionStrategies { +//! syncing: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! importing: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! block_construction: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! offchain_worker: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! other: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! }, +//! chain_spec: Box::new(development_config()), +//! log_targets: vec![], +//! }; +//! let mut node = Node::::new(config).unwrap(); +//! +//! type Balances = pallet_balances::Pallet; +//! +//! let (alice, bob) = (Alice.pair(), Bob.pair()); +//! let (alice_account_id, bob_acount_id) = ( +//! MultiSigner::from(alice.public()).into_account(), +//! MultiSigner::from(bob.public()).into_account() +//! ); +//! +//! /// the function with_state allows us to read state, pretty cool right? :D +//! let old_balance = node.with_state(|| Balances::free_balance(alice_account_id.clone())); +//! +//! // 70 dots +//! let amount = 70_000_000_000_000; +//! +//! /// Send extrinsic in action. +//! node.submit_extrinsic(BalancesCall::transfer(bob_acount_id.clone(), amount), alice_account_id.clone()); +//! +//! /// Produce blocks in action, Powered by manual-seal™. +//! node.seal_blocks(1); +//! +//! /// we can check the new state :D +//! let new_balance = node.with_state(|| Balances::free_balance(alice_account_id)); +//! +//! /// we can now make assertions on how state has changed. +//! assert_eq!(old_balance + amount, new_balance); +//! } +//! ``` + +use manual_seal::consensus::ConsensusDataProvider; +use sc_executor::NativeExecutionDispatch; +use sc_service::{Configuration, TFullBackend, TFullClient, TaskManager}; +use sp_api::{ConstructRuntimeApi, TransactionFor}; +use sp_consensus::{BlockImport, SelectChain}; +use sp_inherents::InherentDataProviders; +use sp_keystore::SyncCryptoStorePtr; +use sp_runtime::traits::{Block as BlockT, SignedExtension}; +use std::sync::Arc; + +mod node; +mod utils; +mod host_functions; + +pub use host_functions::*; +pub use node::*; + +/// Wrapper trait for concrete type required by this testing framework. +pub trait ChainInfo: Sized { + /// Opaque block type + type Block: BlockT; + + /// Executor type + type Executor: NativeExecutionDispatch + 'static; + + /// Runtime + type Runtime: frame_system::Config; + + /// RuntimeApi + type RuntimeApi: Send + + Sync + + 'static + + ConstructRuntimeApi>; + + /// select chain type. + type SelectChain: SelectChain + 'static; + + /// Block import type. + type BlockImport: Send + + Sync + + Clone + + BlockImport< + Self::Block, + Error = sp_consensus::Error, + Transaction = TransactionFor, Self::Block>, + > + 'static; + + /// The signed extras required by the runtime + type SignedExtras: SignedExtension; + + /// Signed extras, this function is caled in an externalities provided environment. + fn signed_extras(from: ::AccountId) -> Self::SignedExtras; + + /// Attempt to create client parts, including block import, + /// select chain strategy and consensus data provider. + fn create_client_parts( + config: &Configuration, + ) -> Result< + ( + Arc>, + Arc>, + SyncCryptoStorePtr, + TaskManager, + InherentDataProviders, + Option< + Box< + dyn ConsensusDataProvider< + Self::Block, + Transaction = TransactionFor< + TFullClient, + Self::Block, + >, + >, + >, + >, + Self::SelectChain, + Self::BlockImport, + ), + sc_service::Error, + >; + + /// Given a call and a handle to the node, execute the call with root privileges. + fn dispatch_with_root(call: ::Call, node: &mut Node); +} diff --git a/test-utils/test-runner/src/node.rs b/test-utils/test-runner/src/node.rs new file mode 100644 index 0000000000000..6965c6a804dbe --- /dev/null +++ b/test-utils/test-runner/src/node.rs @@ -0,0 +1,476 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::Arc; + +use futures::{FutureExt, SinkExt, channel::{mpsc, oneshot}}; +use jsonrpc_core::MetaIoHandler; +use manual_seal::{run_manual_seal, EngineCommand, ManualSealParams}; +use sc_cli::build_runtime; +use sc_client_api::{ + backend::{self, Backend}, CallExecutor, ExecutorProvider, + execution_extensions::ExecutionStrategies, +}; +use sc_service::{ + build_network, spawn_tasks, BuildNetworkParams, SpawnTasksParams, TFullBackend, + TFullCallExecutor, TFullClient, TaskManager, TaskType, ChainSpec, BasePath, + Configuration, DatabaseConfig, KeepBlocks, TransactionStorageMode, config::KeystoreConfig, +}; +use sc_transaction_pool::BasicPool; +use sp_api::{ApiExt, ConstructRuntimeApi, Core, Metadata, OverlayedChanges, StorageTransactionCache}; +use sp_block_builder::BlockBuilder; +use sp_blockchain::HeaderBackend; +use sp_core::ExecutionContext; +use sp_offchain::OffchainWorkerApi; +use sp_runtime::traits::{Block as BlockT, Extrinsic}; +use sp_runtime::{generic::BlockId, transaction_validity::TransactionSource, MultiSignature, MultiAddress}; +use sp_runtime::{generic::UncheckedExtrinsic, traits::NumberFor}; +use sp_session::SessionKeys; +use sp_state_machine::Ext; +use sp_transaction_pool::runtime_api::TaggedTransactionQueue; +use sp_transaction_pool::TransactionPool; + +pub use crate::utils::{logger, base_path}; +use crate::ChainInfo; +use log::LevelFilter; +use sp_keyring::sr25519::Keyring::Alice; +use sc_network::{multiaddr, config::{NetworkConfiguration, TransportConfig, Role}}; +use sc_informant::OutputFormat; +use sc_executor::WasmExecutionMethod; + +/// This holds a reference to a running node on another thread, +/// the node process is dropped when this struct is dropped +/// also holds logs from the process. +pub struct Node { + /// rpc handler for communicating with the node over rpc. + rpc_handler: Arc>, + /// Stream of log lines + log_stream: mpsc::UnboundedReceiver, + /// node tokio runtime + _runtime: tokio::runtime::Runtime, + /// handle to the running node. + _task_manager: Option, + /// client instance + client: Arc>, + /// transaction pool + pool: Arc< + dyn TransactionPool< + Block = T::Block, + Hash = ::Hash, + Error = sc_transaction_pool::error::Error, + InPoolTransaction = sc_transaction_graph::base_pool::Transaction< + ::Hash, + ::Extrinsic, + >, + >, + >, + /// channel to communicate with manual seal on. + manual_seal_command_sink: mpsc::Sender::Hash>>, + /// backend type. + backend: Arc>, + /// Block number at initialization of this Node. + initial_block_number: NumberFor +} + +/// Configuration options for the node. +pub struct NodeConfig { + /// A set of log targets you'd like to enable/disbale + pub log_targets: Vec<(&'static str, LevelFilter)>, + + /// ChainSpec for the runtime + pub chain_spec: Box, + + /// wasm execution strategies. + pub execution_strategies: ExecutionStrategies, +} + +type EventRecord = frame_system::EventRecord<::Event, ::Hash>; + +impl Node { + /// Starts a node with the manual-seal authorship. + pub fn new(node_config: NodeConfig) -> Result + where + >>::RuntimeApi: + Core + + Metadata + + OffchainWorkerApi + + SessionKeys + + TaggedTransactionQueue + + BlockBuilder + + ApiExt as Backend>::State>, + { + let NodeConfig { log_targets, mut chain_spec, execution_strategies } = node_config; + let tokio_runtime = build_runtime().unwrap(); + + // unbounded logs, should be fine, test is shortlived. + let (log_sink, log_stream) = mpsc::unbounded(); + + logger(log_targets, tokio_runtime.handle().clone(), log_sink); + let runtime_handle = tokio_runtime.handle().clone(); + + let task_executor = move |fut, task_type| match task_type { + TaskType::Async => runtime_handle.spawn(fut).map(drop), + TaskType::Blocking => runtime_handle + .spawn_blocking(move || futures::executor::block_on(fut)) + .map(drop), + }; + + let base_path = if let Some(base) = base_path() { + BasePath::new(base) + } else { + BasePath::new_temp_dir().expect("couldn't create a temp dir") + }; + let root_path = base_path.path().to_path_buf().join("chains").join(chain_spec.id()); + + let key_seed = Alice.to_seed(); + let storage = chain_spec + .as_storage_builder() + .build_storage() + .expect("could not build storage"); + + chain_spec.set_storage(storage); + + let mut network_config = NetworkConfiguration::new( + format!("Test Node for: {}", key_seed), + "network/test/0.1", + Default::default(), + None, + ); + let informant_output_format = OutputFormat { enable_color: false }; + + network_config.allow_non_globals_in_dht = true; + + network_config + .listen_addresses + .push(multiaddr::Protocol::Memory(rand::random()).into()); + + network_config.transport = TransportConfig::MemoryOnly; + + let config = Configuration { + impl_name: "test-node".to_string(), + impl_version: "0.1".to_string(), + role: Role::Authority, + task_executor: task_executor.into(), + transaction_pool: Default::default(), + network: network_config, + keystore: KeystoreConfig::Path { + path: root_path.join("key"), + password: None, + }, + database: DatabaseConfig::RocksDb { + path: root_path.join("db"), + cache_size: 128, + }, + state_cache_size: 16777216, + state_cache_child_ratio: None, + chain_spec, + wasm_method: WasmExecutionMethod::Interpreted, + execution_strategies, + rpc_http: None, + rpc_ws: None, + rpc_ipc: None, + rpc_ws_max_connections: None, + rpc_cors: None, + rpc_methods: Default::default(), + prometheus_config: None, + telemetry_endpoints: None, + telemetry_external_transport: None, + default_heap_pages: None, + offchain_worker: Default::default(), + force_authoring: false, + disable_grandpa: false, + dev_key_seed: Some(key_seed), + tracing_targets: None, + tracing_receiver: Default::default(), + max_runtime_instances: 8, + announce_block: true, + base_path: Some(base_path), + wasm_runtime_overrides: None, + informant_output_format, + disable_log_reloading: false, + keystore_remote: None, + keep_blocks: KeepBlocks::All, + state_pruning: Default::default(), + transaction_storage: TransactionStorageMode::BlockBody, + }; + + let ( + client, + backend, + keystore, + mut task_manager, + inherent_data_providers, + consensus_data_provider, + select_chain, + block_import, + ) = T::create_client_parts(&config)?; + + let import_queue = + manual_seal::import_queue(Box::new(block_import.clone()), &task_manager.spawn_essential_handle(), None); + + let transaction_pool = BasicPool::new_full( + config.transaction_pool.clone(), + true.into(), + config.prometheus_registry(), + task_manager.spawn_handle(), + client.clone(), + ); + + let (network, network_status_sinks, system_rpc_tx, network_starter) = { + let params = BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: None, + block_announce_validator_builder: None, + }; + build_network(params)? + }; + + sc_service::build_offchain_workers( + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), + ); + + // Proposer object for block authorship. + let env = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + config.prometheus_registry(), + None + ); + + // Channel for the rpc handler to communicate with the authorship task. + let (command_sink, commands_stream) = mpsc::channel(10); + + let rpc_handlers = { + let params = SpawnTasksParams { + config, + client: client.clone(), + backend: backend.clone(), + task_manager: &mut task_manager, + keystore, + on_demand: None, + transaction_pool: transaction_pool.clone(), + rpc_extensions_builder: Box::new(move |_, _| jsonrpc_core::IoHandler::default()), + remote_blockchain: None, + network, + network_status_sinks, + system_rpc_tx, + telemetry: None + }; + spawn_tasks(params)? + }; + + // Background authorship future. + let authorship_future = run_manual_seal(ManualSealParams { + block_import, + env, + client: client.clone(), + pool: transaction_pool.pool().clone(), + commands_stream, + select_chain, + consensus_data_provider, + inherent_data_providers, + }); + + // spawn the authorship task as an essential task. + task_manager + .spawn_essential_handle() + .spawn("manual-seal", authorship_future); + + network_starter.start_network(); + let rpc_handler = rpc_handlers.io_handler(); + let initial_number = client.info().best_number; + + Ok(Self { + rpc_handler, + _task_manager: Some(task_manager), + _runtime: tokio_runtime, + client, + pool: transaction_pool, + backend, + log_stream, + manual_seal_command_sink: command_sink, + initial_block_number: initial_number, + }) + } + + /// Returns a reference to the rpc handlers. + pub fn rpc_handler(&self) -> Arc> { + self.rpc_handler.clone() + } + + /// Return a reference to the Client + pub fn client(&self) -> Arc> { + self.client.clone() + } + + /// Executes closure in an externalities provided environment. + pub fn with_state(&self, closure: impl FnOnce() -> R) -> R + where + as CallExecutor>::Error: std::fmt::Debug, + { + let id = BlockId::Hash(self.client.info().best_hash); + let mut overlay = OverlayedChanges::default(); + let changes_trie = backend::changes_tries_state_at_block(&id, self.backend.changes_trie_storage()).unwrap(); + let mut cache = + StorageTransactionCache:: as Backend>::State>::default(); + let mut extensions = self + .client + .execution_extensions() + .extensions(&id, ExecutionContext::BlockConstruction); + let state_backend = self + .backend + .state_at(id.clone()) + .expect(&format!("State at block {} not found", id)); + + let mut ext = Ext::new( + &mut overlay, + &mut cache, + &state_backend, + changes_trie.clone(), + Some(&mut extensions), + ); + sp_externalities::set_and_run_with_externalities(&mut ext, closure) + } + + /// submit some extrinsic to the node, providing the sending account. + pub fn submit_extrinsic( + &mut self, + call: impl Into<::Call>, + from: ::AccountId, + ) -> ::Hash + where + ::Extrinsic: From< + UncheckedExtrinsic< + MultiAddress< + ::AccountId, + ::Index, + >, + ::Call, + MultiSignature, + T::SignedExtras, + >, + >, + { + let extra = self.with_state(|| T::signed_extras(from.clone())); + let signed_data = Some((from.into(), MultiSignature::Sr25519(Default::default()), extra)); + let ext = UncheckedExtrinsic::< + MultiAddress< + ::AccountId, + ::Index, + >, + ::Call, + MultiSignature, + T::SignedExtras, + >::new(call.into(), signed_data) + .expect("UncheckedExtrinsic::new() always returns Some"); + let at = self.client.info().best_hash; + + self._runtime + .block_on( + self.pool.submit_one(&BlockId::Hash(at), TransactionSource::Local, ext.into()), + ) + .unwrap() + } + + /// Get the events of the most recently produced block + pub fn events(&self) -> Vec> { + self.with_state(|| frame_system::Pallet::::events()) + } + + /// Checks the node logs for a specific entry. + pub fn assert_log_line(&mut self, content: &str) { + futures::executor::block_on(async { + use futures::StreamExt; + + while let Some(log_line) = self.log_stream.next().await { + if log_line.contains(content) { + return; + } + } + + panic!("Could not find {} in logs content", content); + }); + } + + /// Instructs manual seal to seal new, possibly empty blocks. + pub fn seal_blocks(&mut self, num: usize) { + let (tokio, sink) = (&mut self._runtime, &mut self.manual_seal_command_sink); + + for count in 0..num { + let (sender, future_block) = oneshot::channel(); + let future = sink.send(EngineCommand::SealNewBlock { + create_empty: true, + finalize: false, + parent_hash: None, + sender: Some(sender), + }); + + tokio.block_on(async { + const ERROR: &'static str = "manual-seal authorship task is shutting down"; + future.await.expect(ERROR); + + match future_block.await.expect(ERROR) { + Ok(block) => log::info!("sealed {} (hash: {}) of {} blocks", count + 1, block.hash, num), + Err(err) => log::error!("failed to seal block {} of {}, error: {:?}", count + 1, num, err), + } + }); + } + } + + /// Revert count number of blocks from the chain. + pub fn revert_blocks(&self, count: NumberFor) { + self.backend.revert(count, true).expect("Failed to revert blocks: "); + } + + /// Revert all blocks added since creation of the node. + pub fn clean(&self) { + // if a db path was specified, revert all blocks we've added + if let Some(_) = base_path() { + let diff = self.client.info().best_number - self.initial_block_number; + self.revert_blocks(diff); + } + } + + /// Performs a runtime upgrade given a wasm blob. + pub fn upgrade_runtime(&mut self, wasm: Vec) + where + ::Call: From> + { + let call = frame_system::Call::set_code(wasm); + T::dispatch_with_root(call.into(), self); + } +} + +impl Drop for Node { + fn drop(&mut self) { + self.clean(); + + if let Some(mut task_manager) = self._task_manager.take() { + // if this isn't called the node will live forever + task_manager.terminate() + } + } +} diff --git a/test-utils/test-runner/src/utils.rs b/test-utils/test-runner/src/utils.rs new file mode 100644 index 0000000000000..7cd512e2d4869 --- /dev/null +++ b/test-utils/test-runner/src/utils.rs @@ -0,0 +1,56 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use futures::{Sink, SinkExt}; +use std::fmt; +use std::io::Write; +use log::LevelFilter; + +/// Base db path gotten from env +pub fn base_path() -> Option { + std::env::var("DB_BASE_PATH").ok() +} + +/// Builds the global logger. +pub fn logger( + log_targets: Vec<(&'static str, LevelFilter)>, + executor: tokio::runtime::Handle, + log_sink: S, +) +where + S: Sink + Clone + Unpin + Send + Sync + 'static, + S::Error: Send + Sync + fmt::Debug, +{ + let mut builder = env_logger::builder(); + builder.format(move |buf: &mut env_logger::fmt::Formatter, record: &log::Record| { + let entry = format!("{} {} {}", record.level(), record.target(), record.args()); + let res = writeln!(buf, "{}", entry); + + let mut log_sink_clone = log_sink.clone(); + let _ = executor.spawn(async move { + log_sink_clone.send(entry).await.expect("log_stream is dropped"); + }); + res + }); + builder.write_style(env_logger::WriteStyle::Always); + + for (module, level) in log_targets { + builder.filter_module(module, level); + } + let _ = builder.is_test(true).try_init(); +} diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index 764d2d18a61b7..3a11df62dc254 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] futures = { version = "0.3", features = ["compat"] } futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" -libp2p-wasm-ext = { version = "0.27", features = ["websocket"] } +libp2p-wasm-ext = { version = "0.28", features = ["websocket"] } console_error_panic_hook = "0.1.6" js-sys = "0.3.34" wasm-bindgen = "0.2.57" diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index 19f4596e92fd6..6784b1ecabf41 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -21,11 +21,17 @@ mod writer; use sc_cli::{ExecutionStrategy, WasmExecutionMethod}; use std::fmt::Debug; +// Add a more relaxed parsing for pallet names by allowing pallet directory names with `-` to be used +// like crate names with `_` +fn parse_pallet_name(pallet: &str) -> String { + pallet.replace("-", "_") +} + /// The `benchmark` command used to benchmark FRAME Pallets. #[derive(Debug, structopt::StructOpt)] pub struct BenchmarkCmd { /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). - #[structopt(short, long)] + #[structopt(short, long, parse(from_str = parse_pallet_name))] pub pallet: String, /// Select an extrinsic inside the pallet to benchmark, or `*` for all. diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index de90933e17978..b8bee6380006a 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -25,9 +25,10 @@ codec = { package = "parity-scale-codec", version = "2.0.0" } sp-io = { version = "3.0.0", path = "../../../primitives/io" } sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } [dev-dependencies] -async-std = { version = "1.6.5", features = ["attributes"] } +tokio = { version = "0.2", features = ["macros"] } [features] remote-test = [] diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 8211274c46298..8cca728c1ffaa 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -18,7 +18,7 @@ //! # Remote Externalities //! //! An equivalent of `sp_io::TestExternalities` that can load its state from a remote substrate -//! based chain, or a local cache file. +//! based chain, or a local state snapshot file. //! //! #### Runtime to Test Against //! @@ -76,7 +76,7 @@ //! assert_eq!( //! // note: the hash corresponds to 3098546. We can check only the parent. //! // https://polkascan.io/kusama/block/3098546 -//! >::block_hash(3098545u32), +//! >::block_hash(3098545u32), //! parent, //! ) //! }); @@ -106,7 +106,7 @@ use std::{ path::{Path, PathBuf}, }; use log::*; -use sp_core::{hashing::twox_128}; +use sp_core::hashing::twox_128; pub use sp_io::TestExternalities; use sp_core::{ hexdisplay::HexDisplay, @@ -115,62 +115,62 @@ use sp_core::{ use codec::{Encode, Decode}; use jsonrpsee_http_client::{HttpClient, HttpConfig}; +use sp_runtime::traits::Block as BlockT; + type KeyPair = (StorageKey, StorageData); -type Hash = sp_core::H256; -// TODO: make these two generic. const LOG_TARGET: &str = "remote-ext"; const TARGET: &str = "http://localhost:9933"; jsonrpsee_proc_macros::rpc_client_api! { - RpcApi { + RpcApi { #[rpc(method = "state_getPairs", positional_params)] - fn storage_pairs(prefix: StorageKey, hash: Option) -> Vec<(StorageKey, StorageData)>; + fn storage_pairs(prefix: StorageKey, hash: Option) -> Vec<(StorageKey, StorageData)>; #[rpc(method = "chain_getFinalizedHead")] - fn finalized_head() -> Hash; + fn finalized_head() -> B::Hash; } } /// The execution mode. #[derive(Clone)] -pub enum Mode { +pub enum Mode { /// Online. - Online(OnlineConfig), - /// Offline. Uses a cached file and needs not any client config. + Online(OnlineConfig), + /// Offline. Uses a state snapshot file and needs not any client config. Offline(OfflineConfig), } /// configuration of the online execution. /// -/// A cache config must be present. +/// A state snapshot config must be present. #[derive(Clone)] pub struct OfflineConfig { - /// The configuration of the cache file to use. It must be present. - pub cache: CacheConfig, + /// The configuration of the state snapshot file to use. It must be present. + pub state_snapshot: SnapshotConfig, } /// Configuration of the online execution. /// -/// A cache config may be present and will be written to in that case. +/// A state snapshot config may be present and will be written to in that case. #[derive(Clone)] -pub struct OnlineConfig { +pub struct OnlineConfig { /// The HTTP uri to use. pub uri: String, /// The block number at which to connect. Will be latest finalized head if not provided. - pub at: Option, - /// An optional cache file to WRITE to, not for reading. Not cached if set to `None`. - pub cache: Option, + pub at: Option, + /// An optional state snapshot file to WRITE to, not for reading. Not written if set to `None`. + pub state_snapshot: Option, /// The modules to scrape. If empty, entire chain state will be scraped. pub modules: Vec, } -impl Default for OnlineConfig { +impl Default for OnlineConfig { fn default() -> Self { - Self { uri: TARGET.to_owned(), at: None, cache: None, modules: Default::default() } + Self { uri: TARGET.to_owned(), at: None, state_snapshot: None, modules: Default::default() } } } -impl OnlineConfig { +impl OnlineConfig { /// Return a new http rpc client. fn rpc(&self) -> HttpClient { HttpClient::new(&self.uri, HttpConfig { max_request_body_size: u32::MAX }) @@ -178,54 +178,50 @@ impl OnlineConfig { } } -/// Configuration of the cache. +/// Configuration of the state snapshot. #[derive(Clone)] -pub struct CacheConfig { - // TODO: I could mix these two into one filed, but I think separate is better bc one can be - // configurable while one not. - /// File name. - pub name: String, - /// Base directory. - pub directory: String, +pub struct SnapshotConfig { + /// The path to the snapshot file. + pub path: PathBuf, } -impl Default for CacheConfig { - fn default() -> Self { - Self { name: "CACHE".into(), directory: ".".into() } +impl SnapshotConfig { + pub fn new>(path: P) -> Self { + Self { path: path.into() } } } -impl CacheConfig { - fn path(&self) -> PathBuf { - Path::new(&self.directory).join(self.name.clone()) +impl Default for SnapshotConfig { + fn default() -> Self { + Self { path: Path::new("SNAPSHOT").into() } } } /// Builder for remote-externalities. -pub struct Builder { +pub struct Builder { inject: Vec, - mode: Mode, + mode: Mode, } -impl Default for Builder { +impl Default for Builder { fn default() -> Self { Self { inject: Default::default(), - mode: Mode::Online(OnlineConfig::default()) + mode: Mode::Online(OnlineConfig::default()), } } } // Mode methods -impl Builder { - fn as_online(&self) -> &OnlineConfig { +impl Builder { + fn as_online(&self) -> &OnlineConfig { match &self.mode { Mode::Online(config) => &config, _ => panic!("Unexpected mode: Online"), } } - fn as_online_mut(&mut self) -> &mut OnlineConfig { + fn as_online_mut(&mut self) -> &mut OnlineConfig { match &mut self.mode { Mode::Online(config) => config, _ => panic!("Unexpected mode: Online"), @@ -234,13 +230,13 @@ impl Builder { } // RPC methods -impl Builder { - async fn rpc_get_head(&self) -> Result { +impl Builder { + async fn rpc_get_head(&self) -> Result { trace!(target: LOG_TARGET, "rpc: finalized_head"); - RpcApi::finalized_head(&self.as_online().rpc()).await.map_err(|e| { + RpcApi::::finalized_head(&self.as_online().rpc()).await.map_err(|e| { error!("Error = {:?}", e); "rpc finalized_head failed." - }) + }) } /// Relay the request to `state_getPairs` rpc endpoint. @@ -249,28 +245,28 @@ impl Builder { async fn rpc_get_pairs( &self, prefix: StorageKey, - at: Hash, + at: B::Hash, ) -> Result, &'static str> { trace!(target: LOG_TARGET, "rpc: storage_pairs: {:?} / {:?}", prefix, at); - RpcApi::storage_pairs(&self.as_online().rpc(), prefix, Some(at)).await.map_err(|e| { + RpcApi::::storage_pairs(&self.as_online().rpc(), prefix, Some(at)).await.map_err(|e| { error!("Error = {:?}", e); "rpc storage_pairs failed" - }) + }) } } // Internal methods -impl Builder { - /// Save the given data as cache. - fn save_cache(&self, data: &[KeyPair], path: &Path) -> Result<(), &'static str> { - info!(target: LOG_TARGET, "writing to cache file {:?}", path); +impl Builder { + /// Save the given data as state snapshot. + fn save_state_snapshot(&self, data: &[KeyPair], path: &Path) -> Result<(), &'static str> { + info!(target: LOG_TARGET, "writing to state snapshot file {:?}", path); fs::write(path, data.encode()).map_err(|_| "fs::write failed.")?; Ok(()) } - /// initialize `Self` from cache. Panics if the file does not exist. - fn load_cache(&self, path: &Path) -> Result, &'static str> { - info!(target: LOG_TARGET, "scraping keypairs from cache {:?}", path,); + /// initialize `Self` from state snapshot. Panics if the file does not exist. + fn load_state_snapshot(&self, path: &Path) -> Result, &'static str> { + info!(target: LOG_TARGET, "scraping keypairs from state snapshot {:?}", path,); let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; Decode::decode(&mut &*bytes).map_err(|_| "decode failed") } @@ -319,12 +315,12 @@ impl Builder { async fn pre_build(mut self) -> Result, &'static str> { let mut base_kv = match self.mode.clone() { - Mode::Offline(config) => self.load_cache(&config.cache.path())?, + Mode::Offline(config) => self.load_state_snapshot(&config.state_snapshot.path)?, Mode::Online(config) => { self.init_remote_client().await?; let kp = self.load_remote().await?; - if let Some(c) = config.cache { - self.save_cache(&kp, &c.path())?; + if let Some(c) = config.state_snapshot { + self.save_state_snapshot(&kp, &c.path)?; } kp } @@ -341,7 +337,7 @@ impl Builder { } // Public methods -impl Builder { +impl Builder { /// Create a new builder. pub fn new() -> Self { Default::default() @@ -355,8 +351,8 @@ impl Builder { self } - /// Configure a cache to be used. - pub fn mode(mut self, mode: Mode) -> Self { + /// Configure a state snapshot to be used. + pub fn mode(mut self, mode: Mode) -> Self { self.mode = mode; self } @@ -375,62 +371,75 @@ impl Builder { } } -#[cfg(feature = "remote-test")] #[cfg(test)] -mod tests { - use super::*; +mod test_prelude { + pub(crate) use super::*; + pub(crate) use sp_runtime::testing::{H256 as Hash, Block as RawBlock, ExtrinsicWrapper}; - fn init_logger() { + pub(crate) type Block = RawBlock>; + + pub(crate) fn init_logger() { let _ = env_logger::Builder::from_default_env() .format_module_path(false) .format_level(true) .try_init(); } +} - #[async_std::test] - async fn can_build_one_pallet() { +#[cfg(test)] +mod tests { + use super::test_prelude::*; + + #[tokio::test] + async fn can_load_state_snapshot() { init_logger(); - Builder::new() - .mode(Mode::Online(OnlineConfig { - modules: vec!["Proxy".into()], - ..Default::default() + Builder::::new() + .mode(Mode::Offline(OfflineConfig { + state_snapshot: SnapshotConfig { path: "test_data/proxy_test".into() }, })) .build() .await - .unwrap() + .expect("Can't read state snapshot file") .execute_with(|| {}); } +} + +#[cfg(all(test, feature = "remote-test"))] +mod remote_tests { + use super::test_prelude::*; - #[async_std::test] - async fn can_load_cache() { + #[tokio::test] + async fn can_build_one_pallet() { init_logger(); - Builder::new() - .mode(Mode::Offline(OfflineConfig { - cache: CacheConfig { name: "proxy_test".into(), ..Default::default() }, + Builder::::new() + .mode(Mode::Online(OnlineConfig { + modules: vec!["Proxy".into()], + ..Default::default() })) .build() .await - .unwrap() + .expect("Can't reach the remote node. Is it running?") .execute_with(|| {}); } - #[async_std::test] - async fn can_create_cache() { + #[tokio::test] + async fn can_create_state_snapshot() { init_logger(); - Builder::new() + Builder::::new() .mode(Mode::Online(OnlineConfig { - cache: Some(CacheConfig { - name: "test_cache_to_remove.bin".into(), + state_snapshot: Some(SnapshotConfig { + name: "test_snapshot_to_remove.bin".into(), ..Default::default() }), ..Default::default() })) .build() .await + .expect("Can't reach the remote node. Is it running?") .unwrap() .execute_with(|| {}); - let to_delete = std::fs::read_dir(CacheConfig::default().directory) + let to_delete = std::fs::read_dir(SnapshotConfig::default().directory) .unwrap() .into_iter() .map(|d| d.unwrap()) @@ -444,9 +453,13 @@ mod tests { } } - #[async_std::test] + #[tokio::test] async fn can_build_all() { init_logger(); - Builder::new().build().await.unwrap().execute_with(|| {}); + Builder::::new() + .build() + .await + .expect("Can't reach the remote node. Is it running?") + .execute_with(|| {}); } } diff --git a/utils/frame/remote-externalities/test_data/proxy_test b/utils/frame/remote-externalities/test_data/proxy_test new file mode 100644 index 0000000000000..548ce9cdba4f1 Binary files /dev/null and b/utils/frame/remote-externalities/test_data/proxy_test differ diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index 4ab38692a5cfa..8e407f3b2d739 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -18,7 +18,7 @@ //! `Structopt`-ready struct for `try-runtime`. use parity_scale_codec::Decode; -use std::{fmt::Debug, str::FromStr}; +use std::{fmt::Debug, path::PathBuf, str::FromStr}; use sc_service::Configuration; use sc_cli::{CliConfiguration, ExecutionStrategy, WasmExecutionMethod}; use sc_executor::NativeExecutor; @@ -37,10 +37,6 @@ pub struct TryRuntimeCmd { #[structopt(flatten)] pub shared_params: sc_cli::SharedParams, - /// The state to use to run the migration. Should be a valid FILE or HTTP URI. - #[structopt(short, long, default_value = "http://localhost:9933")] - pub state: State, - /// The execution strategy that should be used for benchmarks #[structopt( long = "execution", @@ -60,32 +56,64 @@ pub struct TryRuntimeCmd { default_value = "Interpreted" )] pub wasm_method: WasmExecutionMethod, + + /// The state to use to run the migration. + #[structopt(subcommand)] + pub state: State, } /// The state to use for a migration dry-run. -#[derive(Debug)] +#[derive(Debug, structopt::StructOpt)] pub enum State { - /// A snapshot. Inner value is a file path. - Snap(String), + /// Use a state snapshot as state to run the migration. + Snap { + snapshot_path: PathBuf, + }, + + /// Use a live chain to run the migration. + Live { + /// An optional state snapshot file to WRITE to. Not written if set to `None`. + #[structopt(short, long)] + snapshot_path: Option, + + /// The block hash at which to connect. + /// Will be latest finalized head if not provided. + #[structopt(short, long, multiple = false, parse(try_from_str = parse_hash))] + block_at: Option, + + /// The modules to scrape. If empty, entire chain state will be scraped. + #[structopt(short, long, require_delimiter = true)] + modules: Option>, + + /// The url to connect to. + #[structopt(default_value = "http://localhost:9933", parse(try_from_str = parse_url))] + url: String, + }, +} - /// A live chain. Inner value is the HTTP uri. - Live(String), +fn parse_hash(block_number: &str) -> Result { + let block_number = if block_number.starts_with("0x") { + &block_number[2..] + } else { + block_number + }; + + if let Some(pos) = block_number.chars().position(|c| !c.is_ascii_hexdigit()) { + Err(format!( + "Expected block hash, found illegal hex character at position: {}", + 2 + pos, + )) + } else { + Ok(block_number.into()) + } } -impl FromStr for State { - type Err = &'static str; - fn from_str(s: &str) -> Result { - match s.get(..7) { - // could use Url crate as well, but lets keep it simple for now. - Some("http://") => Ok(State::Live(s.to_string())), - Some("file://") => s - .split("//") - .collect::>() - .get(1) - .map(|s| State::Snap(s.to_string())) - .ok_or("invalid file URI"), - _ => Err("invalid format. Must be a valid HTTP or File URI"), - } +fn parse_url(s: &str) -> Result { + if s.starts_with("http://") { + // could use Url crate as well, but lets keep it simple for now. + Ok(s.to_string()) + } else { + Err("not a valid HTTP url: must start with 'http://'") } } @@ -93,6 +121,10 @@ impl TryRuntimeCmd { pub async fn run(&self, config: Configuration) -> sc_cli::Result<()> where B: BlockT, + B::Hash: FromStr, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, ExecDispatch: NativeExecutionDispatch + 'static, { let spec = config.chain_spec; @@ -121,13 +153,26 @@ impl TryRuntimeCmd { ); let ext = { - use remote_externalities::{Builder, Mode, CacheConfig, OfflineConfig, OnlineConfig}; + use remote_externalities::{Builder, Mode, SnapshotConfig, OfflineConfig, OnlineConfig}; let builder = match &self.state { - State::Snap(file_path) => Builder::new().mode(Mode::Offline(OfflineConfig { - cache: CacheConfig { name: file_path.into(), ..Default::default() }, - })), - State::Live(http_uri) => Builder::new().mode(Mode::Online(OnlineConfig { - uri: http_uri.into(), + State::Snap { snapshot_path } => { + Builder::::new().mode(Mode::Offline(OfflineConfig { + state_snapshot: SnapshotConfig::new(snapshot_path), + })) + }, + State::Live { + url, + snapshot_path, + block_at, + modules + } => Builder::::new().mode(Mode::Online(OnlineConfig { + uri: url.into(), + state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), + modules: modules.clone().unwrap_or_default(), + at: match block_at { + Some(b) => Some(b.parse().map_err(|e| format!("Could not parse hash: {:?}", e))?), + None => None, + }, ..Default::default() })), };

(); - if type_id == sp_std::any::TypeId::of::>() { + if type_id == sp_std::any::TypeId::of::>() { return Some("System") } if type_id == sp_std::any::TypeId::of::() { @@ -492,8 +492,8 @@ mod tests { } impl_runtime_metadata!( - for TestRuntime with modules where Extrinsic = TestExtrinsic - system::Module as System { index 0 } with Event, + for TestRuntime with pallets where Extrinsic = TestExtrinsic + system::Pallet as System { index 0 } with Event, event_module::Module as Module { index 1 } with Event Call, event_module2::Module as Module2 { index 2 } with Event Storage Call, ); diff --git a/frame/support/src/storage/generator/mod.rs b/frame/support/src/storage/generator/mod.rs index fc2a21ff72517..86eafe86f43f4 100644 --- a/frame/support/src/storage/generator/mod.rs +++ b/frame/support/src/storage/generator/mod.rs @@ -80,7 +80,8 @@ mod tests { let translate_fn = |old: Option| -> Option<(u64, u64)> { old.map(|o| (o.into(), (o*2).into())) }; - let _ = Value::translate(translate_fn); + let res = Value::translate(translate_fn); + debug_assert!(res.is_ok()); // new storage should be `(1111, 1111 * 2)` assert_eq!(Value::get(), (1111, 2222)); diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 286c545d30d90..ba4869d4b8718 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -15,2331 +15,64 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Traits for FRAME. +//! Traits and associated utilities for use in the FRAME environment. //! //! NOTE: If you're looking for `parameter_types`, it has moved in to the top-level module. -use sp_std::{prelude::*, result, marker::PhantomData, ops::Div, fmt::Debug}; -use codec::{FullCodec, Codec, Encode, Decode, EncodeLike}; -use sp_core::u32_trait::Value as U32; -use sp_runtime::{ - traits::{ - AtLeast32Bit, AtLeast32BitUnsigned, Block as BlockT, BadOrigin, Convert, - MaybeSerializeDeserialize, SaturatedConversion, Saturating, StoredMapError, - UniqueSaturatedFrom, UniqueSaturatedInto, Zero, - }, - BoundToRuntimeAppPublic, ConsensusEngineId, DispatchError, DispatchResult, Percent, - RuntimeAppPublic, RuntimeDebug, +pub mod tokens; +pub use tokens::fungible; +pub use tokens::fungibles; +pub use tokens::currency::{ + Currency, LockIdentifier, LockableCurrency, ReservableCurrency, VestingSchedule, }; -use sp_staking::SessionIndex; -use crate::dispatch::Parameter; -use crate::storage::StorageMap; -use crate::weights::Weight; -use bitflags::bitflags; -use impl_trait_for_tuples::impl_for_tuples; +pub use tokens::imbalance::{Imbalance, OnUnbalanced, SignedImbalance}; +pub use tokens::{ExistenceRequirement, WithdrawReasons, BalanceStatus}; -/// Re-expected for the macro. -#[doc(hidden)] -pub use sp_std::{mem::{swap, take}, cell::RefCell, vec::Vec, boxed::Box}; +mod members; +pub use members::{Contains, ContainsLengthBound, InitializeMembers, ChangeMembers}; -/// A trait for online node inspection in a session. -/// -/// Something that can give information about the current validator set. -pub trait ValidatorSet { - /// Type for representing validator id in a session. - type ValidatorId: Parameter; - /// A type for converting `AccountId` to `ValidatorId`. - type ValidatorIdOf: Convert>; - - /// Returns current session index. - fn session_index() -> SessionIndex; - - /// Returns the active set of validators. - fn validators() -> Vec; -} - -/// [`ValidatorSet`] combined with an identification. -pub trait ValidatorSetWithIdentification: ValidatorSet { - /// Full identification of `ValidatorId`. - type Identification: Parameter; - /// A type for converting `ValidatorId` to `Identification`. - type IdentificationOf: Convert>; -} - -/// A session handler for specific key type. -pub trait OneSessionHandler: BoundToRuntimeAppPublic { - /// The key type expected. - type Key: Decode + Default + RuntimeAppPublic; - - /// The given validator set will be used for the genesis session. - /// It is guaranteed that the given validator set will also be used - /// for the second session, therefore the first call to `on_new_session` - /// should provide the same validator set. - fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator, ValidatorId: 'a; - - /// Session set has changed; act appropriately. Note that this can be called - /// before initialization of your module. - /// - /// `changed` is true when at least one of the session keys - /// or the underlying economic identities/distribution behind one the - /// session keys has changed, false otherwise. - /// - /// The `validators` are the validators of the incoming session, and `queued_validators` - /// will follow. - fn on_new_session<'a, I: 'a>( - changed: bool, - validators: I, - queued_validators: I, - ) where I: Iterator, ValidatorId: 'a; - - /// A notification for end of the session. - /// - /// Note it is triggered before any `SessionManager::end_session` handlers, - /// so we can still affect the validator set. - fn on_before_session_ending() {} - - /// A validator got disabled. Act accordingly until a new session begins. - fn on_disabled(_validator_index: usize); -} - -/// Simple trait for providing a filter over a reference to some type. -pub trait Filter { - /// Determine if a given value should be allowed through the filter (returns `true`) or not. - fn filter(_: &T) -> bool; -} - -impl Filter for () { - fn filter(_: &T) -> bool { true } -} - -/// Trait to add a constraint onto the filter. -pub trait FilterStack: Filter { - /// The type used to archive the stack. - type Stack; - - /// Add a new `constraint` onto the filter. - fn push(constraint: impl Fn(&T) -> bool + 'static); - - /// Removes the most recently pushed, and not-yet-popped, constraint from the filter. - fn pop(); - - /// Clear the filter, returning a value that may be used later to `restore` it. - fn take() -> Self::Stack; - - /// Restore the filter from a previous `take` operation. - fn restore(taken: Self::Stack); -} - -/// Guard type for pushing a constraint to a `FilterStack` and popping when dropped. -pub struct FilterStackGuard, T>(PhantomData<(F, T)>); - -/// Guard type for clearing all pushed constraints from a `FilterStack` and reinstating them when -/// dropped. -pub struct ClearFilterGuard, T>(Option, PhantomData); - -impl, T> FilterStackGuard { - /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when - /// this instance is dropped. - pub fn new(constraint: impl Fn(&T) -> bool + 'static) -> Self { - F::push(constraint); - Self(PhantomData) - } -} - -impl, T> Drop for FilterStackGuard { - fn drop(&mut self) { - F::pop(); - } -} - -impl, T> ClearFilterGuard { - /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when - /// this instance is dropped. - pub fn new() -> Self { - Self(Some(F::take()), PhantomData) - } -} - -impl, T> Drop for ClearFilterGuard { - fn drop(&mut self) { - if let Some(taken) = self.0.take() { - F::restore(taken); - } - } -} - -/// Simple trait for providing a filter over a reference to some type, given an instance of itself. -pub trait InstanceFilter: Sized + Send + Sync { - /// Determine if a given value should be allowed through the filter (returns `true`) or not. - fn filter(&self, _: &T) -> bool; - - /// Determines whether `self` matches at least everything that `_o` does. - fn is_superset(&self, _o: &Self) -> bool { false } -} - -impl InstanceFilter for () { - fn filter(&self, _: &T) -> bool { true } - fn is_superset(&self, _o: &Self) -> bool { true } -} - -#[macro_export] -macro_rules! impl_filter_stack { - ($target:ty, $base:ty, $call:ty, $module:ident) => { - #[cfg(feature = "std")] - mod $module { - #[allow(unused_imports)] - use super::*; - use $crate::traits::{swap, take, RefCell, Vec, Box, Filter, FilterStack}; - - thread_local! { - static FILTER: RefCell bool + 'static>>> = RefCell::new(Vec::new()); - } - - impl Filter<$call> for $target { - fn filter(call: &$call) -> bool { - <$base>::filter(call) && - FILTER.with(|filter| filter.borrow().iter().all(|f| f(call))) - } - } - - impl FilterStack<$call> for $target { - type Stack = Vec bool + 'static>>; - fn push(f: impl Fn(&$call) -> bool + 'static) { - FILTER.with(|filter| filter.borrow_mut().push(Box::new(f))); - } - fn pop() { - FILTER.with(|filter| filter.borrow_mut().pop()); - } - fn take() -> Self::Stack { - FILTER.with(|filter| take(filter.borrow_mut().as_mut())) - } - fn restore(mut s: Self::Stack) { - FILTER.with(|filter| swap(filter.borrow_mut().as_mut(), &mut s)); - } - } - } - - #[cfg(not(feature = "std"))] - mod $module { - #[allow(unused_imports)] - use super::*; - use $crate::traits::{swap, take, RefCell, Vec, Box, Filter, FilterStack}; - - struct ThisFilter(RefCell bool + 'static>>>); - // NOTE: Safe only in wasm (guarded above) because there's only one thread. - unsafe impl Send for ThisFilter {} - unsafe impl Sync for ThisFilter {} - - static FILTER: ThisFilter = ThisFilter(RefCell::new(Vec::new())); - - impl Filter<$call> for $target { - fn filter(call: &$call) -> bool { - <$base>::filter(call) && FILTER.0.borrow().iter().all(|f| f(call)) - } - } - - impl FilterStack<$call> for $target { - type Stack = Vec bool + 'static>>; - fn push(f: impl Fn(&$call) -> bool + 'static) { - FILTER.0.borrow_mut().push(Box::new(f)); - } - fn pop() { - FILTER.0.borrow_mut().pop(); - } - fn take() -> Self::Stack { - take(FILTER.0.borrow_mut().as_mut()) - } - fn restore(mut s: Self::Stack) { - swap(FILTER.0.borrow_mut().as_mut(), &mut s); - } - } - } - } -} - -/// Type that provide some integrity tests. -/// -/// This implemented for modules by `decl_module`. -#[impl_for_tuples(30)] -pub trait IntegrityTest { - /// Run integrity test. - /// - /// The test is not executed in a externalities provided environment. - fn integrity_test() {} -} - -#[cfg(test)] -mod test_impl_filter_stack { - use super::*; - - pub struct IsCallable; - pub struct BaseFilter; - impl Filter for BaseFilter { - fn filter(x: &u32) -> bool { x % 2 == 0 } - } - impl_filter_stack!( - crate::traits::test_impl_filter_stack::IsCallable, - crate::traits::test_impl_filter_stack::BaseFilter, - u32, - is_callable - ); - - #[test] - fn impl_filter_stack_should_work() { - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - - IsCallable::push(|x| *x < 42); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - - IsCallable::push(|x| *x % 3 == 0); - assert!(IsCallable::filter(&36)); - assert!(!IsCallable::filter(&40)); - - IsCallable::pop(); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - - let saved = IsCallable::take(); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - - IsCallable::restore(saved); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - - IsCallable::pop(); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - } - - #[test] - fn guards_should_work() { - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - { - let _guard_1 = FilterStackGuard::::new(|x| *x < 42); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - { - let _guard_2 = FilterStackGuard::::new(|x| *x % 3 == 0); - assert!(IsCallable::filter(&36)); - assert!(!IsCallable::filter(&40)); - } - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - { - let _guard_2 = ClearFilterGuard::::new(); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - } - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - } - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - } -} - -/// An abstraction of a value stored within storage, but possibly as part of a larger composite -/// item. -pub trait StoredMap { - /// Get the item, or its default if it doesn't yet exist; we make no distinction between the - /// two. - fn get(k: &K) -> T; - - /// Maybe mutate the item only if an `Ok` value is returned from `f`. Do nothing if an `Err` is - /// returned. It is removed or reset to default value if it has been mutated to `None` - fn try_mutate_exists>( - k: &K, - f: impl FnOnce(&mut Option) -> Result, - ) -> Result; - - // Everything past here has a default implementation. - - /// Mutate the item. - fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { - Self::mutate_exists(k, |maybe_account| match maybe_account { - Some(ref mut account) => f(account), - x @ None => { - let mut account = Default::default(); - let r = f(&mut account); - *x = Some(account); - r - } - }) - } - - /// Mutate the item, removing or resetting to default value if it has been mutated to `None`. - /// - /// This is infallible as long as the value does not get destroyed. - fn mutate_exists( - k: &K, - f: impl FnOnce(&mut Option) -> R, - ) -> Result { - Self::try_mutate_exists(k, |x| -> Result { Ok(f(x)) }) - } - - /// Set the item to something new. - fn insert(k: &K, t: T) -> Result<(), StoredMapError> { Self::mutate(k, |i| *i = t) } - - /// Remove the item or otherwise replace it with its default value; we don't care which. - fn remove(k: &K) -> Result<(), StoredMapError> { Self::mutate_exists(k, |x| *x = None) } -} - -/// A simple, generic one-parameter event notifier/handler. -pub trait HandleLifetime { - /// An account was created. - fn created(_t: &T) -> Result<(), StoredMapError> { Ok(()) } - - /// An account was killed. - fn killed(_t: &T) -> Result<(), StoredMapError> { Ok(()) } -} - -impl HandleLifetime for () {} - -/// A shim for placing around a storage item in order to use it as a `StoredValue`. Ideally this -/// wouldn't be needed as `StorageValue`s should blanket implement `StoredValue`s, however this -/// would break the ability to have custom impls of `StoredValue`. The other workaround is to -/// implement it directly in the macro. -/// -/// This form has the advantage that two additional types are provides, `Created` and `Removed`, -/// which are both generic events that can be tied to handlers to do something in the case of being -/// about to create an account where one didn't previously exist (at all; not just where it used to -/// be the default value), or where the account is being removed or reset back to the default value -/// where previously it did exist (though may have been in a default state). This works well with -/// system module's `CallOnCreatedAccount` and `CallKillAccount`. -pub struct StorageMapShim(sp_std::marker::PhantomData<(S, L, K, T)>); -impl< - S: StorageMap, - L: HandleLifetime, - K: FullCodec, - T: FullCodec + Default, -> StoredMap for StorageMapShim { - fn get(k: &K) -> T { S::get(k) } - fn insert(k: &K, t: T) -> Result<(), StoredMapError> { - if !S::contains_key(&k) { - L::created(k)?; - } - S::insert(k, t); - Ok(()) - } - fn remove(k: &K) -> Result<(), StoredMapError> { - if S::contains_key(&k) { - L::killed(&k)?; - S::remove(k); - } - Ok(()) - } - fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { - if !S::contains_key(&k) { - L::created(k)?; - } - Ok(S::mutate(k, f)) - } - fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> Result { - S::try_mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); - let r = f(maybe_value); - let exists = maybe_value.is_some(); - - if !existed && exists { - L::created(k)?; - } else if existed && !exists { - L::killed(k)?; - } - Ok(r) - }) - } - fn try_mutate_exists>( - k: &K, - f: impl FnOnce(&mut Option) -> Result, - ) -> Result { - S::try_mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); - let r = f(maybe_value)?; - let exists = maybe_value.is_some(); - - if !existed && exists { - L::created(k).map_err(E::from)?; - } else if existed && !exists { - L::killed(k).map_err(E::from)?; - } - Ok(r) - }) - } -} - -/// Something that can estimate at which block the next session rotation will happen (i.e. a new -/// session starts). -/// -/// The accuracy of the estimates is dependent on the specific implementation, but in order to get -/// the best estimate possible these methods should be called throughout the duration of the session -/// (rather than calling once and storing the result). -/// -/// This should be the same logical unit that dictates `ShouldEndSession` to the session module. No -/// assumptions are made about the scheduling of the sessions. -pub trait EstimateNextSessionRotation { - /// Return the average length of a session. - /// - /// This may or may not be accurate. - fn average_session_length() -> BlockNumber; - - /// Return an estimate of the current session progress. - /// - /// None should be returned if the estimation fails to come to an answer. - fn estimate_current_session_progress(now: BlockNumber) -> (Option, Weight); - - /// Return the block number at which the next session rotation is estimated to happen. - /// - /// None should be returned if the estimation fails to come to an answer. - fn estimate_next_session_rotation(now: BlockNumber) -> (Option, Weight); -} - -impl EstimateNextSessionRotation for () { - fn average_session_length() -> BlockNumber { - Zero::zero() - } - - fn estimate_current_session_progress(_: BlockNumber) -> (Option, Weight) { - (None, Zero::zero()) - } - - fn estimate_next_session_rotation(_: BlockNumber) -> (Option, Weight) { - (None, Zero::zero()) - } -} - -/// Something that can estimate at which block scheduling of the next session will happen (i.e when -/// we will try to fetch new validators). -/// -/// This only refers to the point when we fetch the next session details and not when we enact them -/// (for enactment there's `EstimateNextSessionRotation`). With `pallet-session` this should be -/// triggered whenever `SessionManager::new_session` is called. -/// -/// For example, if we are using a staking module this would be the block when the session module -/// would ask staking what the next validator set will be, as such this must always be implemented -/// by the session module. -pub trait EstimateNextNewSession { - /// Return the average length of a session. - /// - /// This may or may not be accurate. - fn average_session_length() -> BlockNumber; - - /// Return the block number at which the next new session is estimated to happen. - /// - /// None should be returned if the estimation fails to come to an answer. - fn estimate_next_new_session(_: BlockNumber) -> (Option, Weight); -} - -impl EstimateNextNewSession for () { - fn average_session_length() -> BlockNumber { - Zero::zero() - } - - fn estimate_next_new_session(_: BlockNumber) -> (Option, Weight) { - (None, Zero::zero()) - } -} - -/// Anything that can have a `::len()` method. -pub trait Len { - /// Return the length of data type. - fn len(&self) -> usize; -} - -impl Len for T where ::IntoIter: ExactSizeIterator { - fn len(&self) -> usize { - self.clone().into_iter().len() - } -} - -/// A trait for querying a single value from a type. -/// -/// It is not required that the value is constant. -pub trait Get { - /// Return the current value. - fn get() -> T; -} - -impl Get for () { - fn get() -> T { T::default() } -} - -/// A trait for querying whether a type can be said to "contain" a value. -pub trait Contains { - /// Return `true` if this "contains" the given value `t`. - fn contains(t: &T) -> bool { Self::sorted_members().binary_search(t).is_ok() } - - /// Get a vector of all members in the set, ordered. - fn sorted_members() -> Vec; - - /// Get the number of items in the set. - fn count() -> usize { Self::sorted_members().len() } - - /// Add an item that would satisfy `contains`. It does not make sure any other - /// state is correctly maintained or generated. - /// - /// **Should be used for benchmarking only!!!** - #[cfg(feature = "runtime-benchmarks")] - fn add(_t: &T) { unimplemented!() } -} - -/// A trait for querying bound for the length of an implementation of `Contains` -pub trait ContainsLengthBound { - /// Minimum number of elements contained - fn min_len() -> usize; - /// Maximum number of elements contained - fn max_len() -> usize; -} - -/// Handler for when a new account has been created. -#[impl_for_tuples(30)] -pub trait OnNewAccount { - /// A new account `who` has been registered. - fn on_new_account(who: &AccountId); -} - -/// The account with the given id was reaped. -#[impl_for_tuples(30)] -pub trait OnKilledAccount { - /// The account with the given id was reaped. - fn on_killed_account(who: &AccountId); -} - -/// A trait for finding the author of a block header based on the `PreRuntime` digests contained -/// within it. -pub trait FindAuthor { - /// Find the author of a block based on the pre-runtime digests. - fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator; -} - -impl FindAuthor for () { - fn find_author<'a, I>(_: I) -> Option - where I: 'a + IntoIterator - { - None - } -} - -/// A trait for verifying the seal of a header and returning the author. -pub trait VerifySeal { - /// Verify a header and return the author, if any. - fn verify_seal(header: &Header) -> Result, &'static str>; -} - -/// Something which can compute and check proofs of -/// a historical key owner and return full identification data of that -/// key owner. -pub trait KeyOwnerProofSystem { - /// The proof of membership itself. - type Proof: Codec; - /// The full identification of a key owner and the stash account. - type IdentificationTuple: Codec; - - /// Prove membership of a key owner in the current block-state. - /// - /// This should typically only be called off-chain, since it may be - /// computationally heavy. - /// - /// Returns `Some` iff the key owner referred to by the given `key` is a - /// member of the current set. - fn prove(key: Key) -> Option; - - /// Check a proof of membership on-chain. Return `Some` iff the proof is - /// valid and recent enough to check. - fn check_proof(key: Key, proof: Self::Proof) -> Option; -} - -impl KeyOwnerProofSystem for () { - // The proof and identification tuples is any bottom type to guarantee that the methods of this - // implementation can never be called or return anything other than `None`. - type Proof = crate::Void; - type IdentificationTuple = crate::Void; - - fn prove(_key: Key) -> Option { - None - } - - fn check_proof(_key: Key, _proof: Self::Proof) -> Option { - None - } -} - -/// Handler for when some currency "account" decreased in balance for -/// some reason. -/// -/// The only reason at present for an increase would be for validator rewards, but -/// there may be other reasons in the future or for other chains. -/// -/// Reasons for decreases include: -/// -/// - Someone got slashed. -/// - Someone paid for a transaction to be included. -pub trait OnUnbalanced { - /// Handler for some imbalances. The different imbalances might have different origins or - /// meanings, dependent on the context. Will default to simply calling on_unbalanced for all - /// of them. Infallible. - fn on_unbalanceds(amounts: impl Iterator) where Imbalance: crate::traits::Imbalance { - Self::on_unbalanced(amounts.fold(Imbalance::zero(), |i, x| x.merge(i))) - } - - /// Handler for some imbalance. Infallible. - fn on_unbalanced(amount: Imbalance) { - amount.try_drop().unwrap_or_else(Self::on_nonzero_unbalanced) - } - - /// Actually handle a non-zero imbalance. You probably want to implement this rather than - /// `on_unbalanced`. - fn on_nonzero_unbalanced(amount: Imbalance) { drop(amount); } -} - -impl OnUnbalanced for () {} - -/// Simple boolean for whether an account needs to be kept in existence. -#[derive(Copy, Clone, Eq, PartialEq)] -pub enum ExistenceRequirement { - /// Operation must not result in the account going out of existence. - /// - /// Note this implies that if the account never existed in the first place, then the operation - /// may legitimately leave the account unchanged and still non-existent. - KeepAlive, - /// Operation may result in account going out of existence. - AllowDeath, -} - -/// A type for which some values make sense to be able to drop without further consideration. -pub trait TryDrop: Sized { - /// Drop an instance cleanly. Only works if its value represents "no-operation". - fn try_drop(self) -> Result<(), Self>; -} - -/// A trait for a not-quite Linear Type that tracks an imbalance. -/// -/// Functions that alter account balances return an object of this trait to -/// express how much account balances have been altered in aggregate. If -/// dropped, the currency system will take some default steps to deal with -/// the imbalance (`balances` module simply reduces or increases its -/// total issuance). Your module should generally handle it in some way, -/// good practice is to do so in a configurable manner using an -/// `OnUnbalanced` type for each situation in which your module needs to -/// handle an imbalance. -/// -/// Imbalances can either be Positive (funds were added somewhere without -/// being subtracted elsewhere - e.g. a reward) or Negative (funds deducted -/// somewhere without an equal and opposite addition - e.g. a slash or -/// system fee payment). -/// -/// Since they are unsigned, the actual type is always Positive or Negative. -/// The trait makes no distinction except to define the `Opposite` type. -/// -/// New instances of zero value can be created (`zero`) and destroyed -/// (`drop_zero`). -/// -/// Existing instances can be `split` and merged either consuming `self` with -/// `merge` or mutating `self` with `subsume`. If the target is an `Option`, -/// then `maybe_merge` and `maybe_subsume` might work better. Instances can -/// also be `offset` with an `Opposite` that is less than or equal to in value. -/// -/// You can always retrieve the raw balance value using `peek`. -#[must_use] -pub trait Imbalance: Sized + TryDrop { - /// The oppositely imbalanced type. They come in pairs. - type Opposite: Imbalance; - - /// The zero imbalance. Can be destroyed with `drop_zero`. - fn zero() -> Self; - - /// Drop an instance cleanly. Only works if its `self.value()` is zero. - fn drop_zero(self) -> Result<(), Self>; - - /// Consume `self` and return two independent instances; the first - /// is guaranteed to be at most `amount` and the second will be the remainder. - fn split(self, amount: Balance) -> (Self, Self); - - /// Consume `self` and return two independent instances; the amounts returned will be in - /// approximately the same ratio as `first`:`second`. - /// - /// NOTE: This requires up to `first + second` room for a multiply, and `first + second` should - /// fit into a `u32`. Overflow will safely saturate in both cases. - fn ration(self, first: u32, second: u32) -> (Self, Self) - where Balance: From + Saturating + Div - { - let total: u32 = first.saturating_add(second); - let amount1 = self.peek().saturating_mul(first.into()) / total.into(); - self.split(amount1) - } - - /// Consume self and add its two components, defined by the first component's balance, - /// element-wise to two pre-existing Imbalances. - /// - /// A convenient replacement for `split` and `merge`. - fn split_merge(self, amount: Balance, others: (Self, Self)) -> (Self, Self) { - let (a, b) = self.split(amount); - (a.merge(others.0), b.merge(others.1)) - } - - /// Consume self and add its two components, defined by the ratio `first`:`second`, - /// element-wise to two pre-existing Imbalances. - /// - /// A convenient replacement for `split` and `merge`. - fn ration_merge(self, first: u32, second: u32, others: (Self, Self)) -> (Self, Self) - where Balance: From + Saturating + Div - { - let (a, b) = self.ration(first, second); - (a.merge(others.0), b.merge(others.1)) - } - - /// Consume self and add its two components, defined by the first component's balance, - /// element-wise into two pre-existing Imbalance refs. - /// - /// A convenient replacement for `split` and `subsume`. - fn split_merge_into(self, amount: Balance, others: &mut (Self, Self)) { - let (a, b) = self.split(amount); - others.0.subsume(a); - others.1.subsume(b); - } - - /// Consume self and add its two components, defined by the ratio `first`:`second`, - /// element-wise to two pre-existing Imbalances. - /// - /// A convenient replacement for `split` and `merge`. - fn ration_merge_into(self, first: u32, second: u32, others: &mut (Self, Self)) - where Balance: From + Saturating + Div - { - let (a, b) = self.ration(first, second); - others.0.subsume(a); - others.1.subsume(b); - } - - /// Consume `self` and an `other` to return a new instance that combines - /// both. - fn merge(self, other: Self) -> Self; - - /// Consume self to mutate `other` so that it combines both. Just like `subsume`, only with - /// reversed arguments. - fn merge_into(self, other: &mut Self) { - other.subsume(self) - } - - /// Consume `self` and maybe an `other` to return a new instance that combines - /// both. - fn maybe_merge(self, other: Option) -> Self { - if let Some(o) = other { - self.merge(o) - } else { - self - } - } - - /// Consume an `other` to mutate `self` into a new instance that combines - /// both. - fn subsume(&mut self, other: Self); - - /// Maybe consume an `other` to mutate `self` into a new instance that combines - /// both. - fn maybe_subsume(&mut self, other: Option) { - if let Some(o) = other { - self.subsume(o) - } - } - - /// Consume self and along with an opposite counterpart to return - /// a combined result. - /// - /// Returns `Ok` along with a new instance of `Self` if this instance has a - /// greater value than the `other`. Otherwise returns `Err` with an instance of - /// the `Opposite`. In both cases the value represents the combination of `self` - /// and `other`. - fn offset(self, other: Self::Opposite) -> Result; - - /// The raw value of self. - fn peek(&self) -> Balance; -} - -/// Either a positive or a negative imbalance. -pub enum SignedImbalance>{ - /// A positive imbalance (funds have been created but none destroyed). - Positive(P), - /// A negative imbalance (funds have been destroyed but none created). - Negative(P::Opposite), -} - -impl< - P: Imbalance, - N: Imbalance, - B: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, -> SignedImbalance { - pub fn zero() -> Self { - SignedImbalance::Positive(P::zero()) - } - - pub fn drop_zero(self) -> Result<(), Self> { - match self { - SignedImbalance::Positive(x) => x.drop_zero().map_err(SignedImbalance::Positive), - SignedImbalance::Negative(x) => x.drop_zero().map_err(SignedImbalance::Negative), - } - } - - /// Consume `self` and an `other` to return a new instance that combines - /// both. - pub fn merge(self, other: Self) -> Self { - match (self, other) { - (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => - SignedImbalance::Positive(one.merge(other)), - (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => - SignedImbalance::Negative(one.merge(other)), - (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => - if one.peek() > other.peek() { - SignedImbalance::Positive(one.offset(other).ok().unwrap_or_else(P::zero)) - } else { - SignedImbalance::Negative(other.offset(one).ok().unwrap_or_else(N::zero)) - }, - (one, other) => other.merge(one), - } - } -} - -/// Split an unbalanced amount two ways between a common divisor. -pub struct SplitTwoWays< - Balance, - Imbalance, - Part1, - Target1, - Part2, - Target2, ->(PhantomData<(Balance, Imbalance, Part1, Target1, Part2, Target2)>); - -impl< - Balance: From + Saturating + Div, - I: Imbalance, - Part1: U32, - Target1: OnUnbalanced, - Part2: U32, - Target2: OnUnbalanced, -> OnUnbalanced for SplitTwoWays -{ - fn on_nonzero_unbalanced(amount: I) { - let total: u32 = Part1::VALUE + Part2::VALUE; - let amount1 = amount.peek().saturating_mul(Part1::VALUE.into()) / total.into(); - let (imb1, imb2) = amount.split(amount1); - Target1::on_unbalanced(imb1); - Target2::on_unbalanced(imb2); - } -} - -/// Abstraction over a fungible assets system. -pub trait Currency { - /// The balance of an account. - type Balance: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + - Default; - - /// The opaque token type for an imbalance. This is returned by unbalanced operations - /// and must be dealt with. It may be dropped but cannot be cloned. - type PositiveImbalance: Imbalance; - - /// The opaque token type for an imbalance. This is returned by unbalanced operations - /// and must be dealt with. It may be dropped but cannot be cloned. - type NegativeImbalance: Imbalance; - - // PUBLIC IMMUTABLES - - /// The combined balance of `who`. - fn total_balance(who: &AccountId) -> Self::Balance; - - /// Same result as `slash(who, value)` (but without the side-effects) assuming there are no - /// balance changes in the meantime and only the reserved balance is not taken into account. - fn can_slash(who: &AccountId, value: Self::Balance) -> bool; - - /// The total amount of issuance in the system. - fn total_issuance() -> Self::Balance; - - /// The minimum balance any single account may have. This is equivalent to the `Balances` module's - /// `ExistentialDeposit`. - fn minimum_balance() -> Self::Balance; - - /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will - /// typically be used to reduce an account by the same amount with e.g. `settle`. - /// - /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example - /// in the case of underflow. - fn burn(amount: Self::Balance) -> Self::PositiveImbalance; - - /// Increase the total issuance by `amount` and return the according imbalance. The imbalance - /// will typically be used to increase an account by the same amount with e.g. - /// `resolve_into_existing` or `resolve_creating`. - /// - /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example - /// in the case of overflow. - fn issue(amount: Self::Balance) -> Self::NegativeImbalance; - - /// Produce a pair of imbalances that cancel each other out exactly. - /// - /// This is just the same as burning and issuing the same amount and has no effect on the - /// total issuance. - fn pair(amount: Self::Balance) -> (Self::PositiveImbalance, Self::NegativeImbalance) { - (Self::burn(amount.clone()), Self::issue(amount)) - } - - /// The 'free' balance of a given account. - /// - /// This is the only balance that matters in terms of most operations on tokens. It alone - /// is used to determine the balance when in the contract execution environment. When this - /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is - /// deleted: specifically `FreeBalance`. - /// - /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets - /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. - fn free_balance(who: &AccountId) -> Self::Balance; - - /// Returns `Ok` iff the account is able to make a withdrawal of the given amount - /// for the given reason. Basically, it's just a dry-run of `withdraw`. - /// - /// `Err(...)` with the reason why not otherwise. - fn ensure_can_withdraw( - who: &AccountId, - _amount: Self::Balance, - reasons: WithdrawReasons, - new_balance: Self::Balance, - ) -> DispatchResult; - - // PUBLIC MUTABLES (DANGEROUS) - - /// Transfer some liquid free balance to another staker. - /// - /// This is a very high-level function. It will ensure all appropriate fees are paid - /// and no imbalance in the system remains. - fn transfer( - source: &AccountId, - dest: &AccountId, - value: Self::Balance, - existence_requirement: ExistenceRequirement, - ) -> DispatchResult; - - /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the - /// free balance. This function cannot fail. - /// - /// The resulting imbalance is the first item of the tuple returned. - /// - /// As much funds up to `value` will be deducted as possible. If this is less than `value`, - /// then a non-zero second item will be returned. - fn slash( - who: &AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance); - - /// Mints `value` to the free balance of `who`. - /// - /// If `who` doesn't exist, nothing is done and an Err returned. - fn deposit_into_existing( - who: &AccountId, - value: Self::Balance - ) -> result::Result; - - /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on - /// success. - fn resolve_into_existing( - who: &AccountId, - value: Self::NegativeImbalance, - ) -> result::Result<(), Self::NegativeImbalance> { - let v = value.peek(); - match Self::deposit_into_existing(who, v) { - Ok(opposite) => Ok(drop(value.offset(opposite))), - _ => Err(value), - } - } - - /// Adds up to `value` to the free balance of `who`. If `who` doesn't exist, it is created. - /// - /// Infallible. - fn deposit_creating( - who: &AccountId, - value: Self::Balance, - ) -> Self::PositiveImbalance; - - /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on - /// success. - fn resolve_creating( - who: &AccountId, - value: Self::NegativeImbalance, - ) { - let v = value.peek(); - drop(value.offset(Self::deposit_creating(who, v))); - } - - /// Removes some free balance from `who` account for `reason` if possible. If `liveness` is - /// `KeepAlive`, then no less than `ExistentialDeposit` must be left remaining. - /// - /// This checks any locks, vesting, and liquidity requirements. If the removal is not possible, - /// then it returns `Err`. - /// - /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value - /// is `value`. - fn withdraw( - who: &AccountId, - value: Self::Balance, - reasons: WithdrawReasons, - liveness: ExistenceRequirement, - ) -> result::Result; - - /// Similar to withdraw, only accepts a `PositiveImbalance` and returns nothing on success. - fn settle( - who: &AccountId, - value: Self::PositiveImbalance, - reasons: WithdrawReasons, - liveness: ExistenceRequirement, - ) -> result::Result<(), Self::PositiveImbalance> { - let v = value.peek(); - match Self::withdraw(who, v, reasons, liveness) { - Ok(opposite) => Ok(drop(value.offset(opposite))), - _ => Err(value), - } - } - - /// Ensure an account's free balance equals some value; this will create the account - /// if needed. - /// - /// Returns a signed imbalance and status to indicate if the account was successfully updated or update - /// has led to killing of the account. - fn make_free_balance_be( - who: &AccountId, - balance: Self::Balance, - ) -> SignedImbalance; -} - -/// Status of funds. -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] -pub enum BalanceStatus { - /// Funds are free, as corresponding to `free` item in Balances. - Free, - /// Funds are reserved, as corresponding to `reserved` item in Balances. - Reserved, -} - -/// A currency where funds can be reserved from the user. -pub trait ReservableCurrency: Currency { - /// Same result as `reserve(who, value)` (but without the side-effects) assuming there - /// are no balance changes in the meantime. - fn can_reserve(who: &AccountId, value: Self::Balance) -> bool; - - /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. - /// - /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` - /// is less than `value`, then a non-zero second item will be returned. - fn slash_reserved( - who: &AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance); - - /// The amount of the balance of a given account that is externally reserved; this can still get - /// slashed, but gets slashed last of all. - /// - /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens - /// that are still 'owned' by the account holder, but which are suspendable. - /// - /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' - /// is deleted: specifically, `ReservedBalance`. - /// - /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets - /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. - fn reserved_balance(who: &AccountId) -> Self::Balance; - - /// Moves `value` from balance to reserved balance. - /// - /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will - /// be returned to notify of this. This is different behavior than `unreserve`. - fn reserve(who: &AccountId, value: Self::Balance) -> DispatchResult; - - /// Moves up to `value` from reserved balance to free balance. This function cannot fail. - /// - /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` - /// is less than `value`, then the remaining amount will be returned. - /// - /// # NOTES - /// - /// - This is different from `reserve`. - /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will - /// invoke `on_reserved_too_low` and could reap the account. - fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance; - - /// Moves up to `value` from reserved balance of account `slashed` to balance of account - /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be - /// returned. Funds will be placed in either the `free` balance or the `reserved` balance, - /// depending on the `status`. - /// - /// As much funds up to `value` will be deducted as possible. If this is less than `value`, - /// then `Ok(non_zero)` will be returned. - fn repatriate_reserved( - slashed: &AccountId, - beneficiary: &AccountId, - value: Self::Balance, - status: BalanceStatus, - ) -> result::Result; -} - -/// An identifier for a lock. Used for disambiguating different locks so that -/// they can be individually replaced or removed. -pub type LockIdentifier = [u8; 8]; - -/// A currency whose accounts can have liquidity restrictions. -pub trait LockableCurrency: Currency { - /// The quantity used to denote time; usually just a `BlockNumber`. - type Moment; - - /// The maximum number of locks a user should have on their account. - type MaxLocks: Get; - - /// Create a new balance lock on account `who`. - /// - /// If the new lock is valid (i.e. not already expired), it will push the struct to - /// the `Locks` vec in storage. Note that you can lock more funds than a user has. - /// - /// If the lock `id` already exists, this will update it. - fn set_lock( - id: LockIdentifier, - who: &AccountId, - amount: Self::Balance, - reasons: WithdrawReasons, - ); - - /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all - /// parameters or creates a new one if it does not exist. - /// - /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it - /// applies the most severe constraints of the two, while `set_lock` replaces the lock - /// with the new parameters. As in, `extend_lock` will set: - /// - maximum `amount` - /// - bitwise mask of all `reasons` - fn extend_lock( - id: LockIdentifier, - who: &AccountId, - amount: Self::Balance, - reasons: WithdrawReasons, - ); - - /// Remove an existing lock. - fn remove_lock( - id: LockIdentifier, - who: &AccountId, - ); -} - -/// A vesting schedule over a currency. This allows a particular currency to have vesting limits -/// applied to it. -pub trait VestingSchedule { - /// The quantity used to denote time; usually just a `BlockNumber`. - type Moment; - - /// The currency that this schedule applies to. - type Currency: Currency; - - /// Get the amount that is currently being vested and cannot be transferred out of this account. - /// Returns `None` if the account has no vesting schedule. - fn vesting_balance(who: &AccountId) -> Option<>::Balance>; - - /// Adds a vesting schedule to a given account. - /// - /// If there already exists a vesting schedule for the given account, an `Err` is returned - /// and nothing is updated. - /// - /// Is a no-op if the amount to be vested is zero. - /// - /// NOTE: This doesn't alter the free balance of the account. - fn add_vesting_schedule( - who: &AccountId, - locked: >::Balance, - per_block: >::Balance, - starting_block: Self::Moment, - ) -> DispatchResult; - - /// Remove a vesting schedule for a given account. - /// - /// NOTE: This doesn't alter the free balance of the account. - fn remove_vesting_schedule(who: &AccountId); -} - -bitflags! { - /// Reasons for moving funds out of an account. - #[derive(Encode, Decode)] - pub struct WithdrawReasons: i8 { - /// In order to pay for (system) transaction costs. - const TRANSACTION_PAYMENT = 0b00000001; - /// In order to transfer ownership. - const TRANSFER = 0b00000010; - /// In order to reserve some funds for a later return or repatriation. - const RESERVE = 0b00000100; - /// In order to pay some other (higher-level) fees. - const FEE = 0b00001000; - /// In order to tip a validator for transaction inclusion. - const TIP = 0b00010000; - } -} - -impl WithdrawReasons { - /// Choose all variants except for `one`. - /// - /// ```rust - /// # use frame_support::traits::WithdrawReasons; - /// # fn main() { - /// assert_eq!( - /// WithdrawReasons::FEE | WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE | WithdrawReasons::TIP, - /// WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT), - /// ); - /// # } - /// ``` - pub fn except(one: WithdrawReasons) -> WithdrawReasons { - let mut flags = Self::all(); - flags.toggle(one); - flags - } -} - -pub trait Time { - type Moment: AtLeast32Bit + Parameter + Default + Copy; - - fn now() -> Self::Moment; -} - -/// Trait to deal with unix time. -pub trait UnixTime { - /// Return duration since `SystemTime::UNIX_EPOCH`. - fn now() -> core::time::Duration; -} - -/// Trait for type that can handle incremental changes to a set of account IDs. -pub trait ChangeMembers { - /// A number of members `incoming` just joined the set and replaced some `outgoing` ones. The - /// new set is given by `new`, and need not be sorted. - /// - /// This resets any previous value of prime. - fn change_members(incoming: &[AccountId], outgoing: &[AccountId], mut new: Vec) { - new.sort(); - Self::change_members_sorted(incoming, outgoing, &new[..]); - } - - /// A number of members `_incoming` just joined the set and replaced some `_outgoing` ones. The - /// new set is thus given by `sorted_new` and **must be sorted**. - /// - /// NOTE: This is the only function that needs to be implemented in `ChangeMembers`. - /// - /// This resets any previous value of prime. - fn change_members_sorted( - incoming: &[AccountId], - outgoing: &[AccountId], - sorted_new: &[AccountId], - ); - - /// Set the new members; they **must already be sorted**. This will compute the diff and use it to - /// call `change_members_sorted`. - /// - /// This resets any previous value of prime. - fn set_members_sorted(new_members: &[AccountId], old_members: &[AccountId]) { - let (incoming, outgoing) = Self::compute_members_diff_sorted(new_members, old_members); - Self::change_members_sorted(&incoming[..], &outgoing[..], &new_members); - } - - /// Compute diff between new and old members; they **must already be sorted**. - /// - /// Returns incoming and outgoing members. - fn compute_members_diff_sorted( - new_members: &[AccountId], - old_members: &[AccountId], - ) -> (Vec, Vec) { - let mut old_iter = old_members.iter(); - let mut new_iter = new_members.iter(); - let mut incoming = Vec::new(); - let mut outgoing = Vec::new(); - let mut old_i = old_iter.next(); - let mut new_i = new_iter.next(); - loop { - match (old_i, new_i) { - (None, None) => break, - (Some(old), Some(new)) if old == new => { - old_i = old_iter.next(); - new_i = new_iter.next(); - } - (Some(old), Some(new)) if old < new => { - outgoing.push(old.clone()); - old_i = old_iter.next(); - } - (Some(old), None) => { - outgoing.push(old.clone()); - old_i = old_iter.next(); - } - (_, Some(new)) => { - incoming.push(new.clone()); - new_i = new_iter.next(); - } - } - } - (incoming, outgoing) - } - - /// Set the prime member. - fn set_prime(_prime: Option) {} - - /// Get the current prime. - fn get_prime() -> Option { - None - } -} - -impl ChangeMembers for () { - fn change_members(_: &[T], _: &[T], _: Vec) {} - fn change_members_sorted(_: &[T], _: &[T], _: &[T]) {} - fn set_members_sorted(_: &[T], _: &[T]) {} - fn set_prime(_: Option) {} -} - -/// Trait for type that can handle the initialization of account IDs at genesis. -pub trait InitializeMembers { - /// Initialize the members to the given `members`. - fn initialize_members(members: &[AccountId]); -} - -impl InitializeMembers for () { - fn initialize_members(_: &[T]) {} -} - -/// A trait that is able to provide randomness. -/// -/// Being a deterministic blockchain, real randomness is difficult to come by, different -/// implementations of this trait will provide different security guarantees. At best, -/// this will be randomness which was hard to predict a long time ago, but that has become -/// easy to predict recently. -pub trait Randomness { - /// Get the most recently determined random seed, along with the time in the past - /// since when it was determinable by chain observers. - /// - /// `subject` is a context identifier and allows you to get a different result to - /// other callers of this function; use it like `random(&b"my context"[..])`. - /// - /// NOTE: The returned seed should only be used to distinguish commitments made before - /// the returned block number. If the block number is too early (i.e. commitments were - /// made afterwards), then ensure no further commitments may be made and repeatedly - /// call this on later blocks until the block number returned is later than the latest - /// commitment. - fn random(subject: &[u8]) -> (Output, BlockNumber); - - /// Get the basic random seed. - /// - /// In general you won't want to use this, but rather `Self::random` which allows - /// you to give a subject for the random result and whose value will be - /// independently low-influence random from any other such seeds. - /// - /// NOTE: The returned seed should only be used to distinguish commitments made before - /// the returned block number. If the block number is too early (i.e. commitments were - /// made afterwards), then ensure no further commitments may be made and repeatedly - /// call this on later blocks until the block number returned is later than the latest - /// commitment. - fn random_seed() -> (Output, BlockNumber) { - Self::random(&[][..]) - } -} - -/// Trait to be used by block producing consensus engine modules to determine -/// how late the current block is (e.g. in a slot-based proposal mechanism how -/// many slots were skipped since the previous block). -pub trait Lateness { - /// Returns a generic measure of how late the current block is compared to - /// its parent. - fn lateness(&self) -> N; -} - -impl Lateness for () { - fn lateness(&self) -> N { - Zero::zero() - } -} - -/// Implementors of this trait provide information about whether or not some validator has -/// been registered with them. The [Session module](../../pallet_session/index.html) is an implementor. -pub trait ValidatorRegistration { - /// Returns true if the provided validator ID has been registered with the implementing runtime - /// module - fn is_registered(id: &ValidatorId) -> bool; -} - -/// Provides information about the pallet setup in the runtime. -/// -/// An implementor should be able to provide information about each pallet that -/// is configured in `construct_runtime!`. -pub trait PalletInfo { - /// Convert the given pallet `P` into its index as configured in the runtime. - fn index() -> Option; - /// Convert the given pallet `P` into its name as configured in the runtime. - fn name() -> Option<&'static str>; -} - -/// The function and pallet name of the Call. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug)] -pub struct CallMetadata { - /// Name of the function. - pub function_name: &'static str, - /// Name of the pallet to which the function belongs. - pub pallet_name: &'static str, -} - -/// Gets the function name of the Call. -pub trait GetCallName { - /// Return all function names. - fn get_call_names() -> &'static [&'static str]; - /// Return the function name of the Call. - fn get_call_name(&self) -> &'static str; -} - -/// Gets the metadata for the Call - function name and pallet name. -pub trait GetCallMetadata { - /// Return all module names. - fn get_module_names() -> &'static [&'static str]; - /// Return all function names for the given `module`. - fn get_call_names(module: &str) -> &'static [&'static str]; - /// Return a [`CallMetadata`], containing function and pallet name of the Call. - fn get_call_metadata(&self) -> CallMetadata; -} - -/// The block finalization trait. -/// -/// Implementing this lets you express what should happen for your pallet when the block is ending. -#[impl_for_tuples(30)] -pub trait OnFinalize { - /// The block is being finalized. Implement to have something happen. - /// - /// NOTE: This function is called AFTER ALL extrinsics in a block are applied, - /// including inherent extrinsics. - fn on_finalize(_n: BlockNumber) {} -} - -/// The block's on idle trait. -/// -/// Implementing this lets you express what should happen for your pallet before -/// block finalization (see `on_finalize` hook) in case any remaining weight is left. -pub trait OnIdle { - /// The block is being finalized. - /// Implement to have something happen in case there is leftover weight. - /// Check the passed `remaining_weight` to make sure it is high enough to allow for - /// your pallet's extra computation. - /// - /// NOTE: This function is called AFTER ALL extrinsics - including inherent extrinsics - - /// in a block are applied but before `on_finalize` is executed. - fn on_idle( - _n: BlockNumber, - _remaining_weight: crate::weights::Weight - ) -> crate::weights::Weight { - 0 - } -} - -#[impl_for_tuples(30)] -impl OnIdle for Tuple { - fn on_idle(n: BlockNumber, remaining_weight: crate::weights::Weight) -> crate::weights::Weight { - let mut weight = 0; - for_tuples!( #( - let adjusted_remaining_weight = remaining_weight.saturating_sub(weight); - weight = weight.saturating_add(Tuple::on_idle(n.clone(), adjusted_remaining_weight)); - )* ); - weight - } -} - -/// The block initialization trait. -/// -/// Implementing this lets you express what should happen for your pallet when the block is -/// beginning (right before the first extrinsic is executed). -pub trait OnInitialize { - /// The block is being initialized. Implement to have something happen. - /// - /// Return the non-negotiable weight consumed in the block. - /// - /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, - /// including inherent extrinsics. Hence for instance, if you runtime includes - /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } -} - -#[impl_for_tuples(30)] -impl OnInitialize for Tuple { - fn on_initialize(n: BlockNumber) -> crate::weights::Weight { - let mut weight = 0; - for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(n.clone())); )* ); - weight - } -} - -/// A trait that will be called at genesis. -/// -/// Implementing this trait for a pallet let's you express operations that should -/// happen at genesis. It will be called in an externalities provided environment and -/// will see the genesis state after all pallets have written their genesis state. -#[impl_for_tuples(30)] -pub trait OnGenesis { - /// Something that should happen at genesis. - fn on_genesis() {} -} - -/// Prefix to be used (optionally) for implementing [`OnRuntimeUpgradeHelpersExt::storage_key`]. -#[cfg(feature = "try-runtime")] -pub const ON_RUNTIME_UPGRADE_PREFIX: &[u8] = b"__ON_RUNTIME_UPGRADE__"; - -/// Some helper functions for [`OnRuntimeUpgrade`] during `try-runtime` testing. -#[cfg(feature = "try-runtime")] -pub trait OnRuntimeUpgradeHelpersExt { - /// Generate a storage key unique to this runtime upgrade. - /// - /// This can be used to communicate data from pre-upgrade to post-upgrade state and check - /// them. See [`Self::set_temp_storage`] and [`Self::get_temp_storage`]. - #[cfg(feature = "try-runtime")] - fn storage_key(ident: &str) -> [u8; 32] { - let prefix = sp_io::hashing::twox_128(ON_RUNTIME_UPGRADE_PREFIX); - let ident = sp_io::hashing::twox_128(ident.as_bytes()); +mod validation; +pub use validation::{ + ValidatorSet, ValidatorSetWithIdentification, OneSessionHandler, FindAuthor, VerifySeal, + EstimateNextNewSession, EstimateNextSessionRotation, KeyOwnerProofSystem, ValidatorRegistration, + Lateness, +}; - let mut final_key = [0u8; 32]; - final_key[..16].copy_from_slice(&prefix); - final_key[16..].copy_from_slice(&ident); +mod filter; +pub use filter::{ + Filter, FilterStack, FilterStackGuard, ClearFilterGuard, InstanceFilter, IntegrityTest, +}; - final_key - } +mod misc; +pub use misc::{ + Len, Get, GetDefault, HandleLifetime, TryDrop, Time, UnixTime, IsType, IsSubType, ExecuteBlock, + SameOrOther, OnNewAccount, OnKilledAccount, OffchainWorker, +}; - /// Get temporary storage data written by [`Self::set_temp_storage`]. - /// - /// Returns `None` if either the data is unavailable or un-decodable. - /// - /// A `at` storage identifier must be provided to indicate where the storage is being read from. - #[cfg(feature = "try-runtime")] - fn get_temp_storage(at: &str) -> Option { - sp_io::storage::get(&Self::storage_key(at)) - .and_then(|bytes| Decode::decode(&mut &*bytes).ok()) - } +mod stored_map; +pub use stored_map::{StoredMap, StorageMapShim}; +mod randomness; +pub use randomness::Randomness; - /// Write some temporary data to a specific storage that can be read (potentially in - /// post-upgrade hook) via [`Self::get_temp_storage`]. - /// - /// A `at` storage identifier must be provided to indicate where the storage is being written - /// to. - #[cfg(feature = "try-runtime")] - fn set_temp_storage(data: T, at: &str) { - sp_io::storage::set(&Self::storage_key(at), &data.encode()); - } -} +mod metadata; +pub use metadata::{ + CallMetadata, GetCallMetadata, GetCallName, PalletInfo, PalletVersion, GetPalletVersion, + PALLET_VERSION_STORAGE_KEY_POSTFIX, +}; +mod hooks; +pub use hooks::{Hooks, OnGenesis, OnInitialize, OnFinalize, OnIdle, OnRuntimeUpgrade, OnTimestampSet}; #[cfg(feature = "try-runtime")] -impl OnRuntimeUpgradeHelpersExt for U {} - -/// The runtime upgrade trait. -/// -/// Implementing this lets you express what should happen when the runtime upgrades, -/// and changes may need to occur to your module. -pub trait OnRuntimeUpgrade { - /// Perform a module upgrade. - /// - /// # Warning - /// - /// This function will be called before we initialized any runtime state, aka `on_initialize` - /// wasn't called yet. So, information like the block number and any other - /// block local data are not accessible. - /// - /// Return the non-negotiable weight consumed for runtime upgrade. - fn on_runtime_upgrade() -> crate::weights::Weight { - 0 - } - - /// Execute some pre-checks prior to a runtime upgrade. - /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { Ok(()) } - - /// Execute some post-checks after a runtime upgrade. - /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { Ok(()) } -} - -#[impl_for_tuples(30)] -impl OnRuntimeUpgrade for Tuple { - fn on_runtime_upgrade() -> crate::weights::Weight { - let mut weight = 0; - for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); - weight - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - let mut result = Ok(()); - for_tuples!( #( result = result.and(Tuple::pre_upgrade()); )* ); - result - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - let mut result = Ok(()); - for_tuples!( #( result = result.and(Tuple::post_upgrade()); )* ); - result - } -} - -/// Off-chain computation trait. -/// -/// Implementing this trait on a module allows you to perform long-running tasks -/// that make (by default) validators generate transactions that feed results -/// of those long-running computations back on chain. -/// -/// NOTE: This function runs off-chain, so it can access the block state, -/// but cannot preform any alterations. More specifically alterations are -/// not forbidden, but they are not persisted in any way after the worker -/// has finished. -#[impl_for_tuples(30)] -pub trait OffchainWorker { - /// This function is being called after every block import (when fully synced). - /// - /// Implement this and use any of the `Offchain` `sp_io` set of APIs - /// to perform off-chain computations, calls and submit transactions - /// with results to trigger any on-chain changes. - /// Any state alterations are lost and are not persisted. - fn offchain_worker(_n: BlockNumber) {} -} - -pub mod schedule { - use super::*; - - /// Information relating to the period of a scheduled task. First item is the length of the - /// period and the second is the number of times it should be executed in total before the task - /// is considered finished and removed. - pub type Period = (BlockNumber, u32); - - /// Priority with which a call is scheduled. It's just a linear amount with lowest values meaning - /// higher priority. - pub type Priority = u8; - - /// The dispatch time of a scheduled task. - #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] - pub enum DispatchTime { - /// At specified block. - At(BlockNumber), - /// After specified number of blocks. - After(BlockNumber), - } - - /// The highest priority. We invert the value so that normal sorting will place the highest - /// priority at the beginning of the list. - pub const HIGHEST_PRIORITY: Priority = 0; - /// Anything of this value or lower will definitely be scheduled on the block that they ask for, even - /// if it breaches the `MaximumWeight` limitation. - pub const HARD_DEADLINE: Priority = 63; - /// The lowest priority. Most stuff should be around here. - pub const LOWEST_PRIORITY: Priority = 255; - - /// A type that can be used as a scheduler. - pub trait Anon { - /// An address which can be used for removing a scheduled task. - type Address: Codec + Clone + Eq + EncodeLike + Debug; - - /// Schedule a dispatch to happen at the beginning of some block in the future. - /// - /// This is not named. - fn schedule( - when: DispatchTime, - maybe_periodic: Option>, - priority: Priority, - origin: Origin, - call: Call - ) -> Result; - - /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, - /// also. - /// - /// Will return an error if the `address` is invalid. - /// - /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. - /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. - /// - /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For - /// that, you must name the task explicitly using the `Named` trait. - fn cancel(address: Self::Address) -> Result<(), ()>; - - /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed - /// only if it is executed *before* the currently scheduled block. For periodic tasks, - /// this dispatch is guaranteed to succeed only before the *initial* execution; for - /// others, use `reschedule_named`. - /// - /// Will return an error if the `address` is invalid. - fn reschedule( - address: Self::Address, - when: DispatchTime, - ) -> Result; - - /// Return the next dispatch time for a given task. - /// - /// Will return an error if the `address` is invalid. - fn next_dispatch_time(address: Self::Address) -> Result; - } - - /// A type that can be used as a scheduler. - pub trait Named { - /// An address which can be used for removing a scheduled task. - type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug; - - /// Schedule a dispatch to happen at the beginning of some block in the future. - /// - /// - `id`: The identity of the task. This must be unique and will return an error if not. - fn schedule_named( - id: Vec, - when: DispatchTime, - maybe_periodic: Option>, - priority: Priority, - origin: Origin, - call: Call - ) -> Result; - - /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances - /// of that, also. - /// - /// Will return an error if the `id` is invalid. - /// - /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. - /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. - fn cancel_named(id: Vec) -> Result<(), ()>; - - /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed - /// only if it is executed *before* the currently scheduled block. - fn reschedule_named( - id: Vec, - when: DispatchTime, - ) -> Result; - - /// Return the next dispatch time for a given task. - /// - /// Will return an error if the `id` is invalid. - fn next_dispatch_time(id: Vec) -> Result; - } -} - -/// Some sort of check on the origin is performed by this object. -pub trait EnsureOrigin { - /// A return type. - type Success; - /// Perform the origin check. - fn ensure_origin(o: OuterOrigin) -> result::Result { - Self::try_origin(o).map_err(|_| BadOrigin) - } - /// Perform the origin check. - fn try_origin(o: OuterOrigin) -> result::Result; - - /// Returns an outer origin capable of passing `try_origin` check. - /// - /// ** Should be used for benchmarking only!!! ** - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> OuterOrigin; -} - -/// Type that can be dispatched with an origin but without checking the origin filter. -/// -/// Implemented for pallet dispatchable type by `decl_module` and for runtime dispatchable by -/// `construct_runtime` and `impl_outer_dispatch`. -pub trait UnfilteredDispatchable { - /// The origin type of the runtime, (i.e. `frame_system::Config::Origin`). - type Origin; - - /// Dispatch this call but do not check the filter in origin. - fn dispatch_bypass_filter(self, origin: Self::Origin) -> crate::dispatch::DispatchResultWithPostInfo; -} - -/// Methods available on `frame_system::Config::Origin`. -pub trait OriginTrait: Sized { - /// Runtime call type, as in `frame_system::Config::Call` - type Call; - - /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin; - - /// The AccountId used across the system. - type AccountId; - - /// Add a filter to the origin. - fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static); - - /// Reset origin filters to default one, i.e `frame_system::Config::BaseCallFilter`. - fn reset_filter(&mut self); - - /// Replace the caller with caller from the other origin - fn set_caller_from(&mut self, other: impl Into); - - /// Filter the call, if false then call is filtered out. - fn filter_call(&self, call: &Self::Call) -> bool; - - /// Get the caller. - fn caller(&self) -> &Self::PalletsOrigin; - - /// Create with system none origin and `frame-system::Config::BaseCallFilter`. - fn none() -> Self; - - /// Create with system root origin and no filter. - fn root() -> Self; - - /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. - fn signed(by: Self::AccountId) -> Self; -} - -/// Trait to be used when types are exactly same. -/// -/// This allow to convert back and forth from type, a reference and a mutable reference. -pub trait IsType: Into + From { - /// Cast reference. - fn from_ref(t: &T) -> &Self; - - /// Cast reference. - fn into_ref(&self) -> &T; - - /// Cast mutable reference. - fn from_mut(t: &mut T) -> &mut Self; - - /// Cast mutable reference. - fn into_mut(&mut self) -> &mut T; -} - -impl IsType for T { - fn from_ref(t: &T) -> &Self { t } - fn into_ref(&self) -> &T { self } - fn from_mut(t: &mut T) -> &mut Self { t } - fn into_mut(&mut self) -> &mut T { self } -} - -/// An instance of a pallet in the storage. -/// -/// It is required that these instances are unique, to support multiple instances per pallet in the same runtime! -/// -/// E.g. for module MyModule default instance will have prefix "MyModule" and other instances -/// "InstanceNMyModule". -pub trait Instance: 'static { - /// Unique module prefix. E.g. "InstanceNMyModule" or "MyModule" - const PREFIX: &'static str; -} - -/// An instance of a storage in a pallet. -/// -/// Define an instance for an individual storage inside a pallet. -/// The pallet prefix is used to isolate the storage between pallets, and the storage prefix is -/// used to isolate storages inside a pallet. -/// -/// NOTE: These information can be used to define storages in pallet such as a `StorageMap` which -/// can use keys after `twox_128(pallet_prefix())++twox_128(STORAGE_PREFIX)` -pub trait StorageInstance { - /// Prefix of a pallet to isolate it from other pallets. - fn pallet_prefix() -> &'static str; - - /// Prefix given to a storage to isolate from other storages in the pallet. - const STORAGE_PREFIX: &'static str; -} - -/// Implement Get by returning Default for any type that implements Default. -pub struct GetDefault; -impl crate::traits::Get for GetDefault { - fn get() -> T { - T::default() - } -} - -/// A trait similar to `Convert` to convert values from `B` an abstract balance type -/// into u64 and back from u128. (This conversion is used in election and other places where complex -/// calculation over balance type is needed) -/// -/// Total issuance of the currency is passed in, but an implementation of this trait may or may not -/// use it. -/// -/// # WARNING -/// -/// the total issuance being passed in implies that the implementation must be aware of the fact -/// that its values can affect the outcome. This implies that if the vote value is dependent on the -/// total issuance, it should never ber written to storage for later re-use. -pub trait CurrencyToVote { - /// Convert balance to u64. - fn to_vote(value: B, issuance: B) -> u64; - - /// Convert u128 to balance. - fn to_currency(value: u128, issuance: B) -> B; -} - -/// An implementation of `CurrencyToVote` tailored for chain's that have a balance type of u128. -/// -/// The factor is the `(total_issuance / u64::max()).max(1)`, represented as u64. Let's look at the -/// important cases: -/// -/// If the chain's total issuance is less than u64::max(), this will always be 1, which means that -/// the factor will not have any effect. In this case, any account's balance is also less. Thus, -/// both of the conversions are basically an `as`; Any balance can fit in u64. -/// -/// If the chain's total issuance is more than 2*u64::max(), then a factor might be multiplied and -/// divided upon conversion. -pub struct U128CurrencyToVote; - -impl U128CurrencyToVote { - fn factor(issuance: u128) -> u128 { - (issuance / u64::max_value() as u128).max(1) - } -} - -impl CurrencyToVote for U128CurrencyToVote { - fn to_vote(value: u128, issuance: u128) -> u64 { - (value / Self::factor(issuance)).saturated_into() - } - - fn to_currency(value: u128, issuance: u128) -> u128 { - value.saturating_mul(Self::factor(issuance)) - } -} - - -/// A naive implementation of `CurrencyConvert` that simply saturates all conversions. -/// -/// # Warning -/// -/// This is designed to be used mostly for testing. Use with care, and think about the consequences. -pub struct SaturatingCurrencyToVote; - -impl + UniqueSaturatedFrom> CurrencyToVote for SaturatingCurrencyToVote { - fn to_vote(value: B, _: B) -> u64 { - value.unique_saturated_into() - } - - fn to_currency(value: u128, _: B) -> B { - B::unique_saturated_from(value) - } -} - -/// Something that can be checked to be a of sub type `T`. -/// -/// This is useful for enums where each variant encapsulates a different sub type, and -/// you need access to these sub types. -/// -/// For example, in FRAME, this trait is implemented for the runtime `Call` enum. Pallets use this -/// to check if a certain call is an instance of the local pallet's `Call` enum. -/// -/// # Example -/// -/// ``` -/// # use frame_support::traits::IsSubType; -/// -/// enum Test { -/// String(String), -/// U32(u32), -/// } -/// -/// impl IsSubType for Test { -/// fn is_sub_type(&self) -> Option<&String> { -/// match self { -/// Self::String(ref r) => Some(r), -/// _ => None, -/// } -/// } -/// } -/// -/// impl IsSubType for Test { -/// fn is_sub_type(&self) -> Option<&u32> { -/// match self { -/// Self::U32(ref r) => Some(r), -/// _ => None, -/// } -/// } -/// } -/// -/// fn main() { -/// let data = Test::String("test".into()); -/// -/// assert_eq!("test", IsSubType::::is_sub_type(&data).unwrap().as_str()); -/// } -/// ``` -pub trait IsSubType { - /// Returns `Some(_)` if `self` is an instance of sub type `T`. - fn is_sub_type(&self) -> Option<&T>; -} - -/// The pallet hooks trait. Implementing this lets you express some logic to execute. -pub trait Hooks { - /// The block is being finalized. Implement to have something happen. - fn on_finalize(_n: BlockNumber) {} - - /// This will be run when the block is being finalized (before `on_finalize`). - /// Implement to have something happen using the remaining weight. - /// Will not fire if the remaining weight is 0. - /// Return the weight used, the hook will subtract it from current weight used - /// and pass the result to the next `on_idle` hook if it exists. - fn on_idle( - _n: BlockNumber, - _remaining_weight: crate::weights::Weight - ) -> crate::weights::Weight { - 0 - } - - /// The block is being initialized. Implement to have something happen. - /// - /// Return the non-negotiable weight consumed in the block. - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } - - /// Perform a module upgrade. - /// - /// NOTE: this doesn't include all pallet logic triggered on runtime upgrade. For instance it - /// doesn't include the write of the pallet version in storage. The final complete logic - /// triggered on runtime upgrade is given by implementation of `OnRuntimeUpgrade` trait by - /// `Pallet`. - /// - /// # Warning - /// - /// This function will be called before we initialized any runtime state, aka `on_initialize` - /// wasn't called yet. So, information like the block number and any other - /// block local data are not accessible. - /// - /// Return the non-negotiable weight consumed for runtime upgrade. - fn on_runtime_upgrade() -> crate::weights::Weight { 0 } - - /// Execute some pre-checks prior to a runtime upgrade. - /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - Ok(()) - } - - /// Execute some post-checks after a runtime upgrade. - /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - Ok(()) - } - - /// Implementing this function on a module allows you to perform long-running tasks - /// that make (by default) validators generate transactions that feed results - /// of those long-running computations back on chain. - /// - /// NOTE: This function runs off-chain, so it can access the block state, - /// but cannot preform any alterations. More specifically alterations are - /// not forbidden, but they are not persisted in any way after the worker - /// has finished. - /// - /// This function is being called after every block import (when fully synced). - /// - /// Implement this and use any of the `Offchain` `sp_io` set of APIs - /// to perform off-chain computations, calls and submit transactions - /// with results to trigger any on-chain changes. - /// Any state alterations are lost and are not persisted. - fn offchain_worker(_n: BlockNumber) {} - - /// Run integrity test. - /// - /// The test is not executed in a externalities provided environment. - fn integrity_test() {} -} - -/// A trait to define the build function of a genesis config, T and I are placeholder for pallet -/// trait and pallet instance. +pub use hooks::{OnRuntimeUpgradeHelpersExt, ON_RUNTIME_UPGRADE_PREFIX}; #[cfg(feature = "std")] -pub trait GenesisBuild: Default + MaybeSerializeDeserialize { - /// The build function is called within an externalities allowing storage APIs. - /// Thus one can write to storage using regular pallet storages. - fn build(&self); - - /// Build the storage using `build` inside default storage. - fn build_storage(&self) -> Result { - let mut storage = Default::default(); - self.assimilate_storage(&mut storage)?; - Ok(storage) - } - - /// Assimilate the storage for this module into pre-existing overlays. - fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { - sp_state_machine::BasicExternalities::execute_with_storage(storage, || { - self.build(); - Ok(()) - }) - } -} - -/// The storage key postfix that is used to store the [`PalletVersion`] per pallet. -/// -/// The full storage key is built by using: -/// Twox128([`PalletInfo::name`]) ++ Twox128([`PALLET_VERSION_STORAGE_KEY_POSTFIX`]) -pub const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; - -/// The version of a pallet. -/// -/// Each pallet version is stored in the state under a fixed key. See -/// [`PALLET_VERSION_STORAGE_KEY_POSTFIX`] for how this key is built. -#[derive(RuntimeDebug, Eq, PartialEq, Encode, Decode, Ord, Clone, Copy)] -pub struct PalletVersion { - /// The major version of the pallet. - pub major: u16, - /// The minor version of the pallet. - pub minor: u8, - /// The patch version of the pallet. - pub patch: u8, -} - -impl PalletVersion { - /// Creates a new instance of `Self`. - pub fn new(major: u16, minor: u8, patch: u8) -> Self { - Self { - major, - minor, - patch, - } - } - - /// Returns the storage key for a pallet version. - /// - /// See [`PALLET_VERSION_STORAGE_KEY_POSTFIX`] on how this key is built. - /// - /// Returns `None` if the given `PI` returned a `None` as name for the given - /// `Pallet`. - pub fn storage_key() -> Option<[u8; 32]> { - let pallet_name = PI::name::()?; - - let pallet_name = sp_io::hashing::twox_128(pallet_name.as_bytes()); - let postfix = sp_io::hashing::twox_128(PALLET_VERSION_STORAGE_KEY_POSTFIX); - - let mut final_key = [0u8; 32]; - final_key[..16].copy_from_slice(&pallet_name); - final_key[16..].copy_from_slice(&postfix); - - Some(final_key) - } - - /// Put this pallet version into the storage. - /// - /// It will use the storage key that is associated with the given `Pallet`. - /// - /// # Panics - /// - /// This function will panic iff `Pallet` can not be found by `PalletInfo`. - /// In a runtime that is put together using - /// [`construct_runtime!`](crate::construct_runtime) this should never happen. - /// - /// It will also panic if this function isn't executed in an externalities - /// provided environment. - pub fn put_into_storage(&self) { - let key = Self::storage_key::() - .expect("Every active pallet has a name in the runtime; qed"); - - crate::storage::unhashed::put(&key, self); - } -} - -impl sp_std::cmp::PartialOrd for PalletVersion { - fn partial_cmp(&self, other: &Self) -> Option { - let res = self.major - .cmp(&other.major) - .then_with(|| - self.minor - .cmp(&other.minor) - .then_with(|| self.patch.cmp(&other.patch) - )); - - Some(res) - } -} - -/// Provides version information about a pallet. -/// -/// This trait provides two functions for returning the version of a -/// pallet. There is a state where both functions can return distinct versions. -/// See [`GetPalletVersion::storage_version`] for more information about this. -pub trait GetPalletVersion { - /// Returns the current version of the pallet. - fn current_version() -> PalletVersion; - - /// Returns the version of the pallet that is stored in storage. - /// - /// Most of the time this will return the exact same version as - /// [`GetPalletVersion::current_version`]. Only when being in - /// a state after a runtime upgrade happened and the pallet did - /// not yet updated its version in storage, this will return a - /// different(the previous, seen from the time of calling) version. - /// - /// See [`PalletVersion`] for more information. - /// - /// # Note - /// - /// If there was no previous version of the pallet stored in the state, - /// this function returns `None`. - fn storage_version() -> Option; -} - -/// Something that can execute a given block. -/// -/// Executing a block means that all extrinsics in a given block will be executed and the resulting -/// header will be checked against the header of the given block. -pub trait ExecuteBlock { - /// Execute the given `block`. - /// - /// This will execute all extrinsics in the block and check that the resulting header is correct. - /// - /// # Panic - /// - /// Panics when an extrinsics panics or the resulting header doesn't match the expected header. - fn execute_block(block: Block); -} - -/// A trait which is called when the timestamp is set in the runtime. -#[impl_trait_for_tuples::impl_for_tuples(30)] -pub trait OnTimestampSet { - /// Called when the timestamp is set. - fn on_timestamp_set(moment: Moment); -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { - struct Test; - impl OnInitialize for Test { - fn on_initialize(_n: u8) -> crate::weights::Weight { - 10 - } - } - impl OnRuntimeUpgrade for Test { - fn on_runtime_upgrade() -> crate::weights::Weight { - 20 - } - } +pub use hooks::GenesisBuild; - assert_eq!(<(Test, Test)>::on_initialize(0), 20); - assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); - } +pub mod schedule; +mod storage; +pub use storage::{Instance, StorageInstance}; - #[test] - fn check_pallet_version_ordering() { - let version = PalletVersion::new(1, 0, 0); - assert!(version > PalletVersion::new(0, 1, 2)); - assert!(version == PalletVersion::new(1, 0, 0)); - assert!(version < PalletVersion::new(1, 0, 1)); - assert!(version < PalletVersion::new(1, 1, 0)); +mod dispatch; +pub use dispatch::{EnsureOrigin, OriginTrait, UnfilteredDispatchable}; - let version = PalletVersion::new(2, 50, 50); - assert!(version < PalletVersion::new(2, 50, 51)); - assert!(version > PalletVersion::new(2, 49, 51)); - assert!(version < PalletVersion::new(3, 49, 51)); - } -} +mod voting; +pub use voting::{CurrencyToVote, SaturatingCurrencyToVote, U128CurrencyToVote}; diff --git a/frame/support/src/traits/dispatch.rs b/frame/support/src/traits/dispatch.rs new file mode 100644 index 0000000000000..29dbaf105a05a --- /dev/null +++ b/frame/support/src/traits/dispatch.rs @@ -0,0 +1,87 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with dispatching calls and the origin from which they are dispatched. + +use crate::dispatch::DispatchResultWithPostInfo; +use sp_runtime::traits::BadOrigin; + +/// Some sort of check on the origin is performed by this object. +pub trait EnsureOrigin { + /// A return type. + type Success; + /// Perform the origin check. + fn ensure_origin(o: OuterOrigin) -> Result { + Self::try_origin(o).map_err(|_| BadOrigin) + } + /// Perform the origin check. + fn try_origin(o: OuterOrigin) -> Result; + + /// Returns an outer origin capable of passing `try_origin` check. + /// + /// ** Should be used for benchmarking only!!! ** + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> OuterOrigin; +} + +/// Type that can be dispatched with an origin but without checking the origin filter. +/// +/// Implemented for pallet dispatchable type by `decl_module` and for runtime dispatchable by +/// `construct_runtime` and `impl_outer_dispatch`. +pub trait UnfilteredDispatchable { + /// The origin type of the runtime, (i.e. `frame_system::Config::Origin`). + type Origin; + + /// Dispatch this call but do not check the filter in origin. + fn dispatch_bypass_filter(self, origin: Self::Origin) -> DispatchResultWithPostInfo; +} + +/// Methods available on `frame_system::Config::Origin`. +pub trait OriginTrait: Sized { + /// Runtime call type, as in `frame_system::Config::Call` + type Call; + + /// The caller origin, overarching type of all pallets origins. + type PalletsOrigin; + + /// The AccountId used across the system. + type AccountId; + + /// Add a filter to the origin. + fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static); + + /// Reset origin filters to default one, i.e `frame_system::Config::BaseCallFilter`. + fn reset_filter(&mut self); + + /// Replace the caller with caller from the other origin + fn set_caller_from(&mut self, other: impl Into); + + /// Filter the call, if false then call is filtered out. + fn filter_call(&self, call: &Self::Call) -> bool; + + /// Get the caller. + fn caller(&self) -> &Self::PalletsOrigin; + + /// Create with system none origin and `frame-system::Config::BaseCallFilter`. + fn none() -> Self; + + /// Create with system root origin and no filter. + fn root() -> Self; + + /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. + fn signed(by: Self::AccountId) -> Self; +} diff --git a/frame/support/src/traits/filter.rs b/frame/support/src/traits/filter.rs new file mode 100644 index 0000000000000..f884a8ece72e5 --- /dev/null +++ b/frame/support/src/traits/filter.rs @@ -0,0 +1,282 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and associated utilities for dealing with abstract constraint filters. + +use sp_std::marker::PhantomData; + +/// Simple trait for providing a filter over a reference to some type. +pub trait Filter { + /// Determine if a given value should be allowed through the filter (returns `true`) or not. + fn filter(_: &T) -> bool; +} + +impl Filter for () { + fn filter(_: &T) -> bool { true } +} + +/// Trait to add a constraint onto the filter. +pub trait FilterStack: Filter { + /// The type used to archive the stack. + type Stack; + + /// Add a new `constraint` onto the filter. + fn push(constraint: impl Fn(&T) -> bool + 'static); + + /// Removes the most recently pushed, and not-yet-popped, constraint from the filter. + fn pop(); + + /// Clear the filter, returning a value that may be used later to `restore` it. + fn take() -> Self::Stack; + + /// Restore the filter from a previous `take` operation. + fn restore(taken: Self::Stack); +} + +/// Guard type for pushing a constraint to a `FilterStack` and popping when dropped. +pub struct FilterStackGuard, T>(PhantomData<(F, T)>); + +/// Guard type for clearing all pushed constraints from a `FilterStack` and reinstating them when +/// dropped. +pub struct ClearFilterGuard, T>(Option, PhantomData); + +impl, T> FilterStackGuard { + /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when + /// this instance is dropped. + pub fn new(constraint: impl Fn(&T) -> bool + 'static) -> Self { + F::push(constraint); + Self(PhantomData) + } +} + +impl, T> Drop for FilterStackGuard { + fn drop(&mut self) { + F::pop(); + } +} + +impl, T> ClearFilterGuard { + /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when + /// this instance is dropped. + pub fn new() -> Self { + Self(Some(F::take()), PhantomData) + } +} + +impl, T> Drop for ClearFilterGuard { + fn drop(&mut self) { + if let Some(taken) = self.0.take() { + F::restore(taken); + } + } +} + +/// Simple trait for providing a filter over a reference to some type, given an instance of itself. +pub trait InstanceFilter: Sized + Send + Sync { + /// Determine if a given value should be allowed through the filter (returns `true`) or not. + fn filter(&self, _: &T) -> bool; + + /// Determines whether `self` matches at least everything that `_o` does. + fn is_superset(&self, _o: &Self) -> bool { false } +} + +impl InstanceFilter for () { + fn filter(&self, _: &T) -> bool { true } + fn is_superset(&self, _o: &Self) -> bool { true } +} + +/// Re-expected for the macro. +#[doc(hidden)] +pub use sp_std::{mem::{swap, take}, cell::RefCell, vec::Vec, boxed::Box}; + +#[macro_export] +macro_rules! impl_filter_stack { + ($target:ty, $base:ty, $call:ty, $module:ident) => { + #[cfg(feature = "std")] + mod $module { + #[allow(unused_imports)] + use super::*; + use $crate::traits::filter::{swap, take, RefCell, Vec, Box, Filter, FilterStack}; + + thread_local! { + static FILTER: RefCell bool + 'static>>> = RefCell::new(Vec::new()); + } + + impl Filter<$call> for $target { + fn filter(call: &$call) -> bool { + <$base>::filter(call) && + FILTER.with(|filter| filter.borrow().iter().all(|f| f(call))) + } + } + + impl FilterStack<$call> for $target { + type Stack = Vec bool + 'static>>; + fn push(f: impl Fn(&$call) -> bool + 'static) { + FILTER.with(|filter| filter.borrow_mut().push(Box::new(f))); + } + fn pop() { + FILTER.with(|filter| filter.borrow_mut().pop()); + } + fn take() -> Self::Stack { + FILTER.with(|filter| take(filter.borrow_mut().as_mut())) + } + fn restore(mut s: Self::Stack) { + FILTER.with(|filter| swap(filter.borrow_mut().as_mut(), &mut s)); + } + } + } + + #[cfg(not(feature = "std"))] + mod $module { + #[allow(unused_imports)] + use super::*; + use $crate::traits::{swap, take, RefCell, Vec, Box, Filter, FilterStack}; + + struct ThisFilter(RefCell bool + 'static>>>); + // NOTE: Safe only in wasm (guarded above) because there's only one thread. + unsafe impl Send for ThisFilter {} + unsafe impl Sync for ThisFilter {} + + static FILTER: ThisFilter = ThisFilter(RefCell::new(Vec::new())); + + impl Filter<$call> for $target { + fn filter(call: &$call) -> bool { + <$base>::filter(call) && FILTER.0.borrow().iter().all(|f| f(call)) + } + } + + impl FilterStack<$call> for $target { + type Stack = Vec bool + 'static>>; + fn push(f: impl Fn(&$call) -> bool + 'static) { + FILTER.0.borrow_mut().push(Box::new(f)); + } + fn pop() { + FILTER.0.borrow_mut().pop(); + } + fn take() -> Self::Stack { + take(FILTER.0.borrow_mut().as_mut()) + } + fn restore(mut s: Self::Stack) { + swap(FILTER.0.borrow_mut().as_mut(), &mut s); + } + } + } + } +} + +/// Type that provide some integrity tests. +/// +/// This implemented for modules by `decl_module`. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait IntegrityTest { + /// Run integrity test. + /// + /// The test is not executed in a externalities provided environment. + fn integrity_test() {} +} + +#[cfg(test)] +pub mod test_impl_filter_stack { + use super::*; + + pub struct IsCallable; + pub struct BaseFilter; + impl Filter for BaseFilter { + fn filter(x: &u32) -> bool { x % 2 == 0 } + } + impl_filter_stack!( + crate::traits::filter::test_impl_filter_stack::IsCallable, + crate::traits::filter::test_impl_filter_stack::BaseFilter, + u32, + is_callable + ); + + #[test] + fn impl_filter_stack_should_work() { + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(IsCallable::filter(&42)); + assert!(!IsCallable::filter(&43)); + + IsCallable::push(|x| *x < 42); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(!IsCallable::filter(&42)); + + IsCallable::push(|x| *x % 3 == 0); + assert!(IsCallable::filter(&36)); + assert!(!IsCallable::filter(&40)); + + IsCallable::pop(); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(!IsCallable::filter(&42)); + + let saved = IsCallable::take(); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(IsCallable::filter(&42)); + assert!(!IsCallable::filter(&43)); + + IsCallable::restore(saved); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(!IsCallable::filter(&42)); + + IsCallable::pop(); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(IsCallable::filter(&42)); + assert!(!IsCallable::filter(&43)); + } + + #[test] + fn guards_should_work() { + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(IsCallable::filter(&42)); + assert!(!IsCallable::filter(&43)); + { + let _guard_1 = FilterStackGuard::::new(|x| *x < 42); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(!IsCallable::filter(&42)); + { + let _guard_2 = FilterStackGuard::::new(|x| *x % 3 == 0); + assert!(IsCallable::filter(&36)); + assert!(!IsCallable::filter(&40)); + } + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(!IsCallable::filter(&42)); + { + let _guard_2 = ClearFilterGuard::::new(); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(IsCallable::filter(&42)); + assert!(!IsCallable::filter(&43)); + } + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(!IsCallable::filter(&42)); + } + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(IsCallable::filter(&42)); + assert!(!IsCallable::filter(&43)); + } +} diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs new file mode 100644 index 0000000000000..5f7b35a9ad25c --- /dev/null +++ b/frame/support/src/traits/hooks.rs @@ -0,0 +1,349 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for hooking tasks to events in a blockchain's lifecycle. + +use sp_arithmetic::traits::Saturating; +use sp_runtime::traits::MaybeSerializeDeserialize; +use impl_trait_for_tuples::impl_for_tuples; + +/// The block initialization trait. +/// +/// Implementing this lets you express what should happen for your pallet when the block is +/// beginning (right before the first extrinsic is executed). +pub trait OnInitialize { + /// The block is being initialized. Implement to have something happen. + /// + /// Return the non-negotiable weight consumed in the block. + /// + /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, + /// including inherent extrinsics. Hence for instance, if you runtime includes + /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } +} + +#[impl_for_tuples(30)] +impl OnInitialize for Tuple { + fn on_initialize(n: BlockNumber) -> crate::weights::Weight { + let mut weight = 0; + for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(n.clone())); )* ); + weight + } +} + +/// The block finalization trait. +/// +/// Implementing this lets you express what should happen for your pallet when the block is ending. +#[impl_for_tuples(30)] +pub trait OnFinalize { + /// The block is being finalized. Implement to have something happen. + /// + /// NOTE: This function is called AFTER ALL extrinsics in a block are applied, + /// including inherent extrinsics. + fn on_finalize(_n: BlockNumber) {} +} + +/// The block's on idle trait. +/// +/// Implementing this lets you express what should happen for your pallet before +/// block finalization (see `on_finalize` hook) in case any remaining weight is left. +pub trait OnIdle { + /// The block is being finalized. + /// Implement to have something happen in case there is leftover weight. + /// Check the passed `remaining_weight` to make sure it is high enough to allow for + /// your pallet's extra computation. + /// + /// NOTE: This function is called AFTER ALL extrinsics - including inherent extrinsics - + /// in a block are applied but before `on_finalize` is executed. + fn on_idle( + _n: BlockNumber, + _remaining_weight: crate::weights::Weight + ) -> crate::weights::Weight { + 0 + } +} + +#[impl_for_tuples(30)] +impl OnIdle for Tuple { + fn on_idle(n: BlockNumber, remaining_weight: crate::weights::Weight) -> crate::weights::Weight { + let mut weight = 0; + for_tuples!( #( + let adjusted_remaining_weight = remaining_weight.saturating_sub(weight); + weight = weight.saturating_add(Tuple::on_idle(n.clone(), adjusted_remaining_weight)); + )* ); + weight + } +} + +/// A trait that will be called at genesis. +/// +/// Implementing this trait for a pallet let's you express operations that should +/// happen at genesis. It will be called in an externalities provided environment and +/// will see the genesis state after all pallets have written their genesis state. +#[impl_for_tuples(30)] +pub trait OnGenesis { + /// Something that should happen at genesis. + fn on_genesis() {} +} + +/// Prefix to be used (optionally) for implementing [`OnRuntimeUpgradeHelpersExt::storage_key`]. +#[cfg(feature = "try-runtime")] +pub const ON_RUNTIME_UPGRADE_PREFIX: &[u8] = b"__ON_RUNTIME_UPGRADE__"; + +/// Some helper functions for [`OnRuntimeUpgrade`] during `try-runtime` testing. +#[cfg(feature = "try-runtime")] +pub trait OnRuntimeUpgradeHelpersExt { + /// Generate a storage key unique to this runtime upgrade. + /// + /// This can be used to communicate data from pre-upgrade to post-upgrade state and check + /// them. See [`Self::set_temp_storage`] and [`Self::get_temp_storage`]. + #[cfg(feature = "try-runtime")] + fn storage_key(ident: &str) -> [u8; 32] { + let prefix = sp_io::hashing::twox_128(ON_RUNTIME_UPGRADE_PREFIX); + let ident = sp_io::hashing::twox_128(ident.as_bytes()); + + let mut final_key = [0u8; 32]; + final_key[..16].copy_from_slice(&prefix); + final_key[16..].copy_from_slice(&ident); + + final_key + } + + /// Get temporary storage data written by [`Self::set_temp_storage`]. + /// + /// Returns `None` if either the data is unavailable or un-decodable. + /// + /// A `at` storage identifier must be provided to indicate where the storage is being read from. + #[cfg(feature = "try-runtime")] + fn get_temp_storage(at: &str) -> Option { + sp_io::storage::get(&Self::storage_key(at)) + .and_then(|bytes| codec::Decode::decode(&mut &*bytes).ok()) + } + + /// Write some temporary data to a specific storage that can be read (potentially in + /// post-upgrade hook) via [`Self::get_temp_storage`]. + /// + /// A `at` storage identifier must be provided to indicate where the storage is being written + /// to. + #[cfg(feature = "try-runtime")] + fn set_temp_storage(data: T, at: &str) { + sp_io::storage::set(&Self::storage_key(at), &data.encode()); + } +} + +#[cfg(feature = "try-runtime")] +impl OnRuntimeUpgradeHelpersExt for U {} + +/// The runtime upgrade trait. +/// +/// Implementing this lets you express what should happen when the runtime upgrades, +/// and changes may need to occur to your module. +pub trait OnRuntimeUpgrade { + /// Perform a module upgrade. + /// + /// # Warning + /// + /// This function will be called before we initialized any runtime state, aka `on_initialize` + /// wasn't called yet. So, information like the block number and any other + /// block local data are not accessible. + /// + /// Return the non-negotiable weight consumed for runtime upgrade. + fn on_runtime_upgrade() -> crate::weights::Weight { + 0 + } + + /// Execute some pre-checks prior to a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { Ok(()) } + + /// Execute some post-checks after a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { Ok(()) } +} + +#[impl_for_tuples(30)] +impl OnRuntimeUpgrade for Tuple { + fn on_runtime_upgrade() -> crate::weights::Weight { + let mut weight = 0; + for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + let mut result = Ok(()); + for_tuples!( #( result = result.and(Tuple::pre_upgrade()); )* ); + result + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + let mut result = Ok(()); + for_tuples!( #( result = result.and(Tuple::post_upgrade()); )* ); + result + } +} + +/// The pallet hooks trait. Implementing this lets you express some logic to execute. +pub trait Hooks { + /// The block is being finalized. Implement to have something happen. + fn on_finalize(_n: BlockNumber) {} + + /// This will be run when the block is being finalized (before `on_finalize`). + /// Implement to have something happen using the remaining weight. + /// Will not fire if the remaining weight is 0. + /// Return the weight used, the hook will subtract it from current weight used + /// and pass the result to the next `on_idle` hook if it exists. + fn on_idle( + _n: BlockNumber, + _remaining_weight: crate::weights::Weight + ) -> crate::weights::Weight { + 0 + } + + /// The block is being initialized. Implement to have something happen. + /// + /// Return the non-negotiable weight consumed in the block. + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } + + /// Perform a module upgrade. + /// + /// NOTE: this doesn't include all pallet logic triggered on runtime upgrade. For instance it + /// doesn't include the write of the pallet version in storage. The final complete logic + /// triggered on runtime upgrade is given by implementation of `OnRuntimeUpgrade` trait by + /// `Pallet`. + /// + /// # Warning + /// + /// This function will be called before we initialized any runtime state, aka `on_initialize` + /// wasn't called yet. So, information like the block number and any other + /// block local data are not accessible. + /// + /// Return the non-negotiable weight consumed for runtime upgrade. + fn on_runtime_upgrade() -> crate::weights::Weight { 0 } + + /// Execute some pre-checks prior to a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) + } + + /// Execute some post-checks after a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + Ok(()) + } + + /// Implementing this function on a module allows you to perform long-running tasks + /// that make (by default) validators generate transactions that feed results + /// of those long-running computations back on chain. + /// + /// NOTE: This function runs off-chain, so it can access the block state, + /// but cannot preform any alterations. More specifically alterations are + /// not forbidden, but they are not persisted in any way after the worker + /// has finished. + /// + /// This function is being called after every block import (when fully synced). + /// + /// Implement this and use any of the `Offchain` `sp_io` set of APIs + /// to perform off-chain computations, calls and submit transactions + /// with results to trigger any on-chain changes. + /// Any state alterations are lost and are not persisted. + fn offchain_worker(_n: BlockNumber) {} + + /// Run integrity test. + /// + /// The test is not executed in a externalities provided environment. + fn integrity_test() {} +} + +/// A trait to define the build function of a genesis config, T and I are placeholder for pallet +/// trait and pallet instance. +#[cfg(feature = "std")] +pub trait GenesisBuild: Default + MaybeSerializeDeserialize { + /// The build function is called within an externalities allowing storage APIs. + /// Thus one can write to storage using regular pallet storages. + fn build(&self); + + /// Build the storage using `build` inside default storage. + fn build_storage(&self) -> Result { + let mut storage = Default::default(); + self.assimilate_storage(&mut storage)?; + Ok(storage) + } + + /// Assimilate the storage for this module into pre-existing overlays. + fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { + sp_state_machine::BasicExternalities::execute_with_storage(storage, || { + self.build(); + Ok(()) + }) + } +} + +/// A trait which is called when the timestamp is set in the runtime. +#[impl_for_tuples(30)] +pub trait OnTimestampSet { + /// Called when the timestamp is set. + fn on_timestamp_set(moment: Moment); +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::traits::metadata::PalletVersion; + + #[test] + fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { + struct Test; + impl OnInitialize for Test { + fn on_initialize(_n: u8) -> crate::weights::Weight { + 10 + } + } + impl OnRuntimeUpgrade for Test { + fn on_runtime_upgrade() -> crate::weights::Weight { + 20 + } + } + + assert_eq!(<(Test, Test)>::on_initialize(0), 20); + assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); + } + + #[test] + fn check_pallet_version_ordering() { + let version = PalletVersion::new(1, 0, 0); + assert!(version > PalletVersion::new(0, 1, 2)); + assert!(version == PalletVersion::new(1, 0, 0)); + assert!(version < PalletVersion::new(1, 0, 1)); + assert!(version < PalletVersion::new(1, 1, 0)); + + let version = PalletVersion::new(2, 50, 50); + assert!(version < PalletVersion::new(2, 50, 51)); + assert!(version > PalletVersion::new(2, 49, 51)); + assert!(version < PalletVersion::new(3, 49, 51)); + } +} diff --git a/frame/support/src/traits/members.rs b/frame/support/src/traits/members.rs new file mode 100644 index 0000000000000..d3ce6786af8c1 --- /dev/null +++ b/frame/support/src/traits/members.rs @@ -0,0 +1,142 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with the idea of membership. + +use sp_std::prelude::*; + +/// A trait for querying whether a type can be said to "contain" a value. +pub trait Contains { + /// Return `true` if this "contains" the given value `t`. + fn contains(t: &T) -> bool { Self::sorted_members().binary_search(t).is_ok() } + + /// Get a vector of all members in the set, ordered. + fn sorted_members() -> Vec; + + /// Get the number of items in the set. + fn count() -> usize { Self::sorted_members().len() } + + /// Add an item that would satisfy `contains`. It does not make sure any other + /// state is correctly maintained or generated. + /// + /// **Should be used for benchmarking only!!!** + #[cfg(feature = "runtime-benchmarks")] + fn add(_t: &T) { unimplemented!() } +} + +/// A trait for querying bound for the length of an implementation of `Contains` +pub trait ContainsLengthBound { + /// Minimum number of elements contained + fn min_len() -> usize; + /// Maximum number of elements contained + fn max_len() -> usize; +} + +/// Trait for type that can handle the initialization of account IDs at genesis. +pub trait InitializeMembers { + /// Initialize the members to the given `members`. + fn initialize_members(members: &[AccountId]); +} + +impl InitializeMembers for () { + fn initialize_members(_: &[T]) {} +} + +/// Trait for type that can handle incremental changes to a set of account IDs. +pub trait ChangeMembers { + /// A number of members `incoming` just joined the set and replaced some `outgoing` ones. The + /// new set is given by `new`, and need not be sorted. + /// + /// This resets any previous value of prime. + fn change_members(incoming: &[AccountId], outgoing: &[AccountId], mut new: Vec) { + new.sort(); + Self::change_members_sorted(incoming, outgoing, &new[..]); + } + + /// A number of members `_incoming` just joined the set and replaced some `_outgoing` ones. The + /// new set is thus given by `sorted_new` and **must be sorted**. + /// + /// NOTE: This is the only function that needs to be implemented in `ChangeMembers`. + /// + /// This resets any previous value of prime. + fn change_members_sorted( + incoming: &[AccountId], + outgoing: &[AccountId], + sorted_new: &[AccountId], + ); + + /// Set the new members; they **must already be sorted**. This will compute the diff and use it to + /// call `change_members_sorted`. + /// + /// This resets any previous value of prime. + fn set_members_sorted(new_members: &[AccountId], old_members: &[AccountId]) { + let (incoming, outgoing) = Self::compute_members_diff_sorted(new_members, old_members); + Self::change_members_sorted(&incoming[..], &outgoing[..], &new_members); + } + + /// Compute diff between new and old members; they **must already be sorted**. + /// + /// Returns incoming and outgoing members. + fn compute_members_diff_sorted( + new_members: &[AccountId], + old_members: &[AccountId], + ) -> (Vec, Vec) { + let mut old_iter = old_members.iter(); + let mut new_iter = new_members.iter(); + let mut incoming = Vec::new(); + let mut outgoing = Vec::new(); + let mut old_i = old_iter.next(); + let mut new_i = new_iter.next(); + loop { + match (old_i, new_i) { + (None, None) => break, + (Some(old), Some(new)) if old == new => { + old_i = old_iter.next(); + new_i = new_iter.next(); + } + (Some(old), Some(new)) if old < new => { + outgoing.push(old.clone()); + old_i = old_iter.next(); + } + (Some(old), None) => { + outgoing.push(old.clone()); + old_i = old_iter.next(); + } + (_, Some(new)) => { + incoming.push(new.clone()); + new_i = new_iter.next(); + } + } + } + (incoming, outgoing) + } + + /// Set the prime member. + fn set_prime(_prime: Option) {} + + /// Get the current prime. + fn get_prime() -> Option { + None + } +} + +impl ChangeMembers for () { + fn change_members(_: &[T], _: &[T], _: Vec) {} + fn change_members_sorted(_: &[T], _: &[T], _: &[T]) {} + fn set_members_sorted(_: &[T], _: &[T]) {} + fn set_prime(_: Option) {} +} diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs new file mode 100644 index 0000000000000..ff4507dce9c98 --- /dev/null +++ b/frame/support/src/traits/metadata.rs @@ -0,0 +1,168 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for managing information attached to pallets and their constituents. + +use codec::{Encode, Decode}; +use sp_runtime::RuntimeDebug; + +/// Provides information about the pallet setup in the runtime. +/// +/// An implementor should be able to provide information about each pallet that +/// is configured in `construct_runtime!`. +pub trait PalletInfo { + /// Convert the given pallet `P` into its index as configured in the runtime. + fn index() -> Option; + /// Convert the given pallet `P` into its name as configured in the runtime. + fn name() -> Option<&'static str>; +} + +/// The function and pallet name of the Call. +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug)] +pub struct CallMetadata { + /// Name of the function. + pub function_name: &'static str, + /// Name of the pallet to which the function belongs. + pub pallet_name: &'static str, +} + +/// Gets the function name of the Call. +pub trait GetCallName { + /// Return all function names. + fn get_call_names() -> &'static [&'static str]; + /// Return the function name of the Call. + fn get_call_name(&self) -> &'static str; +} + +/// Gets the metadata for the Call - function name and pallet name. +pub trait GetCallMetadata { + /// Return all module names. + fn get_module_names() -> &'static [&'static str]; + /// Return all function names for the given `module`. + fn get_call_names(module: &str) -> &'static [&'static str]; + /// Return a [`CallMetadata`], containing function and pallet name of the Call. + fn get_call_metadata(&self) -> CallMetadata; +} + +/// The storage key postfix that is used to store the [`PalletVersion`] per pallet. +/// +/// The full storage key is built by using: +/// Twox128([`PalletInfo::name`]) ++ Twox128([`PALLET_VERSION_STORAGE_KEY_POSTFIX`]) +pub const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; + +/// The version of a pallet. +/// +/// Each pallet version is stored in the state under a fixed key. See +/// [`PALLET_VERSION_STORAGE_KEY_POSTFIX`] for how this key is built. +#[derive(RuntimeDebug, Eq, PartialEq, Encode, Decode, Ord, Clone, Copy)] +pub struct PalletVersion { + /// The major version of the pallet. + pub major: u16, + /// The minor version of the pallet. + pub minor: u8, + /// The patch version of the pallet. + pub patch: u8, +} + +impl PalletVersion { + /// Creates a new instance of `Self`. + pub fn new(major: u16, minor: u8, patch: u8) -> Self { + Self { + major, + minor, + patch, + } + } + + /// Returns the storage key for a pallet version. + /// + /// See [`PALLET_VERSION_STORAGE_KEY_POSTFIX`] on how this key is built. + /// + /// Returns `None` if the given `PI` returned a `None` as name for the given + /// `Pallet`. + pub fn storage_key() -> Option<[u8; 32]> { + let pallet_name = PI::name::()?; + + let pallet_name = sp_io::hashing::twox_128(pallet_name.as_bytes()); + let postfix = sp_io::hashing::twox_128(PALLET_VERSION_STORAGE_KEY_POSTFIX); + + let mut final_key = [0u8; 32]; + final_key[..16].copy_from_slice(&pallet_name); + final_key[16..].copy_from_slice(&postfix); + + Some(final_key) + } + + /// Put this pallet version into the storage. + /// + /// It will use the storage key that is associated with the given `Pallet`. + /// + /// # Panics + /// + /// This function will panic iff `Pallet` can not be found by `PalletInfo`. + /// In a runtime that is put together using + /// [`construct_runtime!`](crate::construct_runtime) this should never happen. + /// + /// It will also panic if this function isn't executed in an externalities + /// provided environment. + pub fn put_into_storage(&self) { + let key = Self::storage_key::() + .expect("Every active pallet has a name in the runtime; qed"); + + crate::storage::unhashed::put(&key, self); + } +} + +impl sp_std::cmp::PartialOrd for PalletVersion { + fn partial_cmp(&self, other: &Self) -> Option { + let res = self.major + .cmp(&other.major) + .then_with(|| + self.minor + .cmp(&other.minor) + .then_with(|| self.patch.cmp(&other.patch) + )); + + Some(res) + } +} + +/// Provides version information about a pallet. +/// +/// This trait provides two functions for returning the version of a +/// pallet. There is a state where both functions can return distinct versions. +/// See [`GetPalletVersion::storage_version`] for more information about this. +pub trait GetPalletVersion { + /// Returns the current version of the pallet. + fn current_version() -> PalletVersion; + + /// Returns the version of the pallet that is stored in storage. + /// + /// Most of the time this will return the exact same version as + /// [`GetPalletVersion::current_version`]. Only when being in + /// a state after a runtime upgrade happened and the pallet did + /// not yet updated its version in storage, this will return a + /// different(the previous, seen from the time of calling) version. + /// + /// See [`PalletVersion`] for more information. + /// + /// # Note + /// + /// If there was no previous version of the pallet stored in the state, + /// this function returns `None`. + fn storage_version() -> Option; +} diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs new file mode 100644 index 0000000000000..2f219942907d8 --- /dev/null +++ b/frame/support/src/traits/misc.rs @@ -0,0 +1,271 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Smaller traits used in FRAME which don't need their own file. + +use sp_runtime::traits::{StoredMapError, Block as BlockT}; +use sp_arithmetic::traits::AtLeast32Bit; +use crate::dispatch::Parameter; + +/// Anything that can have a `::len()` method. +pub trait Len { + /// Return the length of data type. + fn len(&self) -> usize; +} + +impl Len for T where ::IntoIter: ExactSizeIterator { + fn len(&self) -> usize { + self.clone().into_iter().len() + } +} + +/// A trait for querying a single value from a type. +/// +/// It is not required that the value is constant. +pub trait Get { + /// Return the current value. + fn get() -> T; +} + +impl Get for () { + fn get() -> T { T::default() } +} + +/// Implement Get by returning Default for any type that implements Default. +pub struct GetDefault; +impl Get for GetDefault { + fn get() -> T { + T::default() + } +} + +/// A type for which some values make sense to be able to drop without further consideration. +pub trait TryDrop: Sized { + /// Drop an instance cleanly. Only works if its value represents "no-operation". + fn try_drop(self) -> Result<(), Self>; +} + +/// Return type used when we need to return one of two items, each of the opposite direction or +/// sign, with one (`Same`) being of the same type as the `self` or primary argument of the function +/// that returned it. +pub enum SameOrOther { + /// No item. + None, + /// An item of the same type as the `Self` on which the return function was called. + Same(A), + /// An item of the opposite type to the `Self` on which the return function was called. + Other(B), +} + +impl TryDrop for SameOrOther { + fn try_drop(self) -> Result<(), Self> { + if let SameOrOther::None = self { + Ok(()) + } else { + Err(self) + } + } +} + +impl SameOrOther { + /// Returns `Ok` with the inner value of `Same` if `self` is that, otherwise returns `Err` with + /// `self`. + pub fn try_same(self) -> Result { + match self { + SameOrOther::Same(a) => Ok(a), + x => Err(x), + } + } + + /// Returns `Ok` with the inner value of `Other` if `self` is that, otherwise returns `Err` with + /// `self`. + pub fn try_other(self) -> Result { + match self { + SameOrOther::Other(b) => Ok(b), + x => Err(x), + } + } + + /// Returns `Ok` if `self` is `None`, otherwise returns `Err` with `self`. + pub fn try_none(self) -> Result<(), Self> { + match self { + SameOrOther::None => Ok(()), + x => Err(x), + } + } + + pub fn same(self) -> Result where A: Default { + match self { + SameOrOther::Same(a) => Ok(a), + SameOrOther::None => Ok(A::default()), + SameOrOther::Other(b) => Err(b), + } + } + + pub fn other(self) -> Result where B: Default { + match self { + SameOrOther::Same(a) => Err(a), + SameOrOther::None => Ok(B::default()), + SameOrOther::Other(b) => Ok(b), + } + } +} + +/// Handler for when a new account has been created. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait OnNewAccount { + /// A new account `who` has been registered. + fn on_new_account(who: &AccountId); +} + +/// The account with the given id was reaped. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait OnKilledAccount { + /// The account with the given id was reaped. + fn on_killed_account(who: &AccountId); +} + +/// A simple, generic one-parameter event notifier/handler. +pub trait HandleLifetime { + /// An account was created. + fn created(_t: &T) -> Result<(), StoredMapError> { Ok(()) } + + /// An account was killed. + fn killed(_t: &T) -> Result<(), StoredMapError> { Ok(()) } +} + +impl HandleLifetime for () {} + +pub trait Time { + type Moment: AtLeast32Bit + Parameter + Default + Copy; + + fn now() -> Self::Moment; +} + +/// Trait to deal with unix time. +pub trait UnixTime { + /// Return duration since `SystemTime::UNIX_EPOCH`. + fn now() -> core::time::Duration; +} + +/// Trait to be used when types are exactly same. +/// +/// This allow to convert back and forth from type, a reference and a mutable reference. +pub trait IsType: Into + From { + /// Cast reference. + fn from_ref(t: &T) -> &Self; + + /// Cast reference. + fn into_ref(&self) -> &T; + + /// Cast mutable reference. + fn from_mut(t: &mut T) -> &mut Self; + + /// Cast mutable reference. + fn into_mut(&mut self) -> &mut T; +} + +impl IsType for T { + fn from_ref(t: &T) -> &Self { t } + fn into_ref(&self) -> &T { self } + fn from_mut(t: &mut T) -> &mut Self { t } + fn into_mut(&mut self) -> &mut T { self } +} + +/// Something that can be checked to be a of sub type `T`. +/// +/// This is useful for enums where each variant encapsulates a different sub type, and +/// you need access to these sub types. +/// +/// For example, in FRAME, this trait is implemented for the runtime `Call` enum. Pallets use this +/// to check if a certain call is an instance of the local pallet's `Call` enum. +/// +/// # Example +/// +/// ``` +/// # use frame_support::traits::IsSubType; +/// +/// enum Test { +/// String(String), +/// U32(u32), +/// } +/// +/// impl IsSubType for Test { +/// fn is_sub_type(&self) -> Option<&String> { +/// match self { +/// Self::String(ref r) => Some(r), +/// _ => None, +/// } +/// } +/// } +/// +/// impl IsSubType for Test { +/// fn is_sub_type(&self) -> Option<&u32> { +/// match self { +/// Self::U32(ref r) => Some(r), +/// _ => None, +/// } +/// } +/// } +/// +/// fn main() { +/// let data = Test::String("test".into()); +/// +/// assert_eq!("test", IsSubType::::is_sub_type(&data).unwrap().as_str()); +/// } +/// ``` +pub trait IsSubType { + /// Returns `Some(_)` if `self` is an instance of sub type `T`. + fn is_sub_type(&self) -> Option<&T>; +} + +/// Something that can execute a given block. +/// +/// Executing a block means that all extrinsics in a given block will be executed and the resulting +/// header will be checked against the header of the given block. +pub trait ExecuteBlock { + /// Execute the given `block`. + /// + /// This will execute all extrinsics in the block and check that the resulting header is correct. + /// + /// # Panic + /// + /// Panics when an extrinsics panics or the resulting header doesn't match the expected header. + fn execute_block(block: Block); +} + +/// Off-chain computation trait. +/// +/// Implementing this trait on a module allows you to perform long-running tasks +/// that make (by default) validators generate transactions that feed results +/// of those long-running computations back on chain. +/// +/// NOTE: This function runs off-chain, so it can access the block state, +/// but cannot preform any alterations. More specifically alterations are +/// not forbidden, but they are not persisted in any way after the worker +/// has finished. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait OffchainWorker { + /// This function is being called after every block import (when fully synced). + /// + /// Implement this and use any of the `Offchain` `sp_io` set of APIs + /// to perform off-chain computations, calls and submit transactions + /// with results to trigger any on-chain changes. + /// Any state alterations are lost and are not persisted. + fn offchain_worker(_n: BlockNumber) {} +} + diff --git a/frame/support/src/traits/randomness.rs b/frame/support/src/traits/randomness.rs new file mode 100644 index 0000000000000..865893f99b393 --- /dev/null +++ b/frame/support/src/traits/randomness.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with on-chain randomness. + +/// A trait that is able to provide randomness. +/// +/// Being a deterministic blockchain, real randomness is difficult to come by, different +/// implementations of this trait will provide different security guarantees. At best, +/// this will be randomness which was hard to predict a long time ago, but that has become +/// easy to predict recently. +pub trait Randomness { + /// Get the most recently determined random seed, along with the time in the past + /// since when it was determinable by chain observers. + /// + /// `subject` is a context identifier and allows you to get a different result to + /// other callers of this function; use it like `random(&b"my context"[..])`. + /// + /// NOTE: The returned seed should only be used to distinguish commitments made before + /// the returned block number. If the block number is too early (i.e. commitments were + /// made afterwards), then ensure no further commitments may be made and repeatedly + /// call this on later blocks until the block number returned is later than the latest + /// commitment. + fn random(subject: &[u8]) -> (Output, BlockNumber); + + /// Get the basic random seed. + /// + /// In general you won't want to use this, but rather `Self::random` which allows + /// you to give a subject for the random result and whose value will be + /// independently low-influence random from any other such seeds. + /// + /// NOTE: The returned seed should only be used to distinguish commitments made before + /// the returned block number. If the block number is too early (i.e. commitments were + /// made afterwards), then ensure no further commitments may be made and repeatedly + /// call this on later blocks until the block number returned is later than the latest + /// commitment. + fn random_seed() -> (Output, BlockNumber) { + Self::random(&[][..]) + } +} diff --git a/frame/support/src/traits/schedule.rs b/frame/support/src/traits/schedule.rs new file mode 100644 index 0000000000000..58e4c419f2813 --- /dev/null +++ b/frame/support/src/traits/schedule.rs @@ -0,0 +1,133 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and associated utilities for scheduling dispatchables in FRAME. + +use sp_std::{prelude::*, fmt::Debug}; +use codec::{Encode, Decode, Codec, EncodeLike}; +use sp_runtime::{RuntimeDebug, DispatchError}; + +/// Information relating to the period of a scheduled task. First item is the length of the +/// period and the second is the number of times it should be executed in total before the task +/// is considered finished and removed. +pub type Period = (BlockNumber, u32); + +/// Priority with which a call is scheduled. It's just a linear amount with lowest values meaning +/// higher priority. +pub type Priority = u8; + +/// The dispatch time of a scheduled task. +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] +pub enum DispatchTime { + /// At specified block. + At(BlockNumber), + /// After specified number of blocks. + After(BlockNumber), +} + +/// The highest priority. We invert the value so that normal sorting will place the highest +/// priority at the beginning of the list. +pub const HIGHEST_PRIORITY: Priority = 0; +/// Anything of this value or lower will definitely be scheduled on the block that they ask for, even +/// if it breaches the `MaximumWeight` limitation. +pub const HARD_DEADLINE: Priority = 63; +/// The lowest priority. Most stuff should be around here. +pub const LOWEST_PRIORITY: Priority = 255; + +/// A type that can be used as a scheduler. +pub trait Anon { + /// An address which can be used for removing a scheduled task. + type Address: Codec + Clone + Eq + EncodeLike + Debug; + + /// Schedule a dispatch to happen at the beginning of some block in the future. + /// + /// This is not named. + fn schedule( + when: DispatchTime, + maybe_periodic: Option>, + priority: Priority, + origin: Origin, + call: Call + ) -> Result; + + /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, + /// also. + /// + /// Will return an error if the `address` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + /// + /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For + /// that, you must name the task explicitly using the `Named` trait. + fn cancel(address: Self::Address) -> Result<(), ()>; + + /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed + /// only if it is executed *before* the currently scheduled block. For periodic tasks, + /// this dispatch is guaranteed to succeed only before the *initial* execution; for + /// others, use `reschedule_named`. + /// + /// Will return an error if the `address` is invalid. + fn reschedule( + address: Self::Address, + when: DispatchTime, + ) -> Result; + + /// Return the next dispatch time for a given task. + /// + /// Will return an error if the `address` is invalid. + fn next_dispatch_time(address: Self::Address) -> Result; +} + +/// A type that can be used as a scheduler. +pub trait Named { + /// An address which can be used for removing a scheduled task. + type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug; + + /// Schedule a dispatch to happen at the beginning of some block in the future. + /// + /// - `id`: The identity of the task. This must be unique and will return an error if not. + fn schedule_named( + id: Vec, + when: DispatchTime, + maybe_periodic: Option>, + priority: Priority, + origin: Origin, + call: Call + ) -> Result; + + /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances + /// of that, also. + /// + /// Will return an error if the `id` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + fn cancel_named(id: Vec) -> Result<(), ()>; + + /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed + /// only if it is executed *before* the currently scheduled block. + fn reschedule_named( + id: Vec, + when: DispatchTime, + ) -> Result; + + /// Return the next dispatch time for a given task. + /// + /// Will return an error if the `id` is invalid. + fn next_dispatch_time(id: Vec) -> Result; +} diff --git a/frame/support/src/traits/storage.rs b/frame/support/src/traits/storage.rs new file mode 100644 index 0000000000000..82e9c1e7a60f6 --- /dev/null +++ b/frame/support/src/traits/storage.rs @@ -0,0 +1,45 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for encoding data related to pallet's storage items. + +/// An instance of a pallet in the storage. +/// +/// It is required that these instances are unique, to support multiple instances per pallet in the same runtime! +/// +/// E.g. for module MyModule default instance will have prefix "MyModule" and other instances +/// "InstanceNMyModule". +pub trait Instance: 'static { + /// Unique module prefix. E.g. "InstanceNMyModule" or "MyModule" + const PREFIX: &'static str; +} + +/// An instance of a storage in a pallet. +/// +/// Define an instance for an individual storage inside a pallet. +/// The pallet prefix is used to isolate the storage between pallets, and the storage prefix is +/// used to isolate storages inside a pallet. +/// +/// NOTE: These information can be used to define storages in pallet such as a `StorageMap` which +/// can use keys after `twox_128(pallet_prefix())++twox_128(STORAGE_PREFIX)` +pub trait StorageInstance { + /// Prefix of a pallet to isolate it from other pallets. + fn pallet_prefix() -> &'static str; + + /// Prefix given to a storage to isolate from other storages in the pallet. + const STORAGE_PREFIX: &'static str; +} diff --git a/frame/support/src/traits/stored_map.rs b/frame/support/src/traits/stored_map.rs new file mode 100644 index 0000000000000..10964541ab32b --- /dev/null +++ b/frame/support/src/traits/stored_map.rs @@ -0,0 +1,141 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and associated datatypes for managing abstract stored values. + +use codec::FullCodec; +use sp_runtime::traits::StoredMapError; +use crate::storage::StorageMap; +use crate::traits::misc::HandleLifetime; + +/// An abstraction of a value stored within storage, but possibly as part of a larger composite +/// item. +pub trait StoredMap { + /// Get the item, or its default if it doesn't yet exist; we make no distinction between the + /// two. + fn get(k: &K) -> T; + + /// Maybe mutate the item only if an `Ok` value is returned from `f`. Do nothing if an `Err` is + /// returned. It is removed or reset to default value if it has been mutated to `None` + fn try_mutate_exists>( + k: &K, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result; + + // Everything past here has a default implementation. + + /// Mutate the item. + fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { + Self::mutate_exists(k, |maybe_account| match maybe_account { + Some(ref mut account) => f(account), + x @ None => { + let mut account = Default::default(); + let r = f(&mut account); + *x = Some(account); + r + } + }) + } + + /// Mutate the item, removing or resetting to default value if it has been mutated to `None`. + /// + /// This is infallible as long as the value does not get destroyed. + fn mutate_exists( + k: &K, + f: impl FnOnce(&mut Option) -> R, + ) -> Result { + Self::try_mutate_exists(k, |x| -> Result { Ok(f(x)) }) + } + + /// Set the item to something new. + fn insert(k: &K, t: T) -> Result<(), StoredMapError> { Self::mutate(k, |i| *i = t) } + + /// Remove the item or otherwise replace it with its default value; we don't care which. + fn remove(k: &K) -> Result<(), StoredMapError> { Self::mutate_exists(k, |x| *x = None) } +} + +/// A shim for placing around a storage item in order to use it as a `StoredValue`. Ideally this +/// wouldn't be needed as `StorageValue`s should blanket implement `StoredValue`s, however this +/// would break the ability to have custom impls of `StoredValue`. The other workaround is to +/// implement it directly in the macro. +/// +/// This form has the advantage that two additional types are provides, `Created` and `Removed`, +/// which are both generic events that can be tied to handlers to do something in the case of being +/// about to create an account where one didn't previously exist (at all; not just where it used to +/// be the default value), or where the account is being removed or reset back to the default value +/// where previously it did exist (though may have been in a default state). This works well with +/// system module's `CallOnCreatedAccount` and `CallKillAccount`. +pub struct StorageMapShim(sp_std::marker::PhantomData<(S, L, K, T)>); +impl< + S: StorageMap, + L: HandleLifetime, + K: FullCodec, + T: FullCodec + Default, +> StoredMap for StorageMapShim { + fn get(k: &K) -> T { S::get(k) } + fn insert(k: &K, t: T) -> Result<(), StoredMapError> { + if !S::contains_key(&k) { + L::created(k)?; + } + S::insert(k, t); + Ok(()) + } + fn remove(k: &K) -> Result<(), StoredMapError> { + if S::contains_key(&k) { + L::killed(&k)?; + S::remove(k); + } + Ok(()) + } + fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { + if !S::contains_key(&k) { + L::created(k)?; + } + Ok(S::mutate(k, f)) + } + fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> Result { + S::try_mutate_exists(k, |maybe_value| { + let existed = maybe_value.is_some(); + let r = f(maybe_value); + let exists = maybe_value.is_some(); + + if !existed && exists { + L::created(k)?; + } else if existed && !exists { + L::killed(k)?; + } + Ok(r) + }) + } + fn try_mutate_exists>( + k: &K, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result { + S::try_mutate_exists(k, |maybe_value| { + let existed = maybe_value.is_some(); + let r = f(maybe_value)?; + let exists = maybe_value.is_some(); + + if !existed && exists { + L::created(k).map_err(E::from)?; + } else if existed && !exists { + L::killed(k).map_err(E::from)?; + } + Ok(r) + }) + } +} diff --git a/frame/support/src/traits/tokens.rs b/frame/support/src/traits/tokens.rs new file mode 100644 index 0000000000000..82af5dbade8f7 --- /dev/null +++ b/frame/support/src/traits/tokens.rs @@ -0,0 +1,28 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for working with tokens and their associated datastructures. + +pub mod fungible; +pub mod fungibles; +pub mod currency; +pub mod imbalance; +mod misc; +pub use misc::{ + WithdrawConsequence, DepositConsequence, ExistenceRequirement, BalanceStatus, WithdrawReasons, +}; +pub use imbalance::Imbalance; diff --git a/frame/support/src/traits/tokens/currency.rs b/frame/support/src/traits/tokens/currency.rs new file mode 100644 index 0000000000000..567ca44aa78c7 --- /dev/null +++ b/frame/support/src/traits/tokens/currency.rs @@ -0,0 +1,208 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Currency trait and associated types. + +use sp_std::fmt::Debug; +use sp_runtime::traits::MaybeSerializeDeserialize; +use crate::dispatch::{DispatchResult, DispatchError}; +use super::misc::{Balance, WithdrawReasons, ExistenceRequirement}; +use super::imbalance::{Imbalance, SignedImbalance}; + + +mod reservable; +pub use reservable::ReservableCurrency; +mod lockable; +pub use lockable::{LockableCurrency, VestingSchedule, LockIdentifier}; + +/// Abstraction over a fungible assets system. +pub trait Currency { + /// The balance of an account. + type Balance: Balance + MaybeSerializeDeserialize + Debug; + + /// The opaque token type for an imbalance. This is returned by unbalanced operations + /// and must be dealt with. It may be dropped but cannot be cloned. + type PositiveImbalance: Imbalance; + + /// The opaque token type for an imbalance. This is returned by unbalanced operations + /// and must be dealt with. It may be dropped but cannot be cloned. + type NegativeImbalance: Imbalance; + + // PUBLIC IMMUTABLES + + /// The combined balance of `who`. + fn total_balance(who: &AccountId) -> Self::Balance; + + /// Same result as `slash(who, value)` (but without the side-effects) assuming there are no + /// balance changes in the meantime and only the reserved balance is not taken into account. + fn can_slash(who: &AccountId, value: Self::Balance) -> bool; + + /// The total amount of issuance in the system. + fn total_issuance() -> Self::Balance; + + /// The minimum balance any single account may have. This is equivalent to the `Balances` module's + /// `ExistentialDeposit`. + fn minimum_balance() -> Self::Balance; + + /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will + /// typically be used to reduce an account by the same amount with e.g. `settle`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example + /// in the case of underflow. + fn burn(amount: Self::Balance) -> Self::PositiveImbalance; + + /// Increase the total issuance by `amount` and return the according imbalance. The imbalance + /// will typically be used to increase an account by the same amount with e.g. + /// `resolve_into_existing` or `resolve_creating`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example + /// in the case of overflow. + fn issue(amount: Self::Balance) -> Self::NegativeImbalance; + + /// Produce a pair of imbalances that cancel each other out exactly. + /// + /// This is just the same as burning and issuing the same amount and has no effect on the + /// total issuance. + fn pair(amount: Self::Balance) -> (Self::PositiveImbalance, Self::NegativeImbalance) { + (Self::burn(amount.clone()), Self::issue(amount)) + } + + /// The 'free' balance of a given account. + /// + /// This is the only balance that matters in terms of most operations on tokens. It alone + /// is used to determine the balance when in the contract execution environment. When this + /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is + /// deleted: specifically `FreeBalance`. + /// + /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + fn free_balance(who: &AccountId) -> Self::Balance; + + /// Returns `Ok` iff the account is able to make a withdrawal of the given amount + /// for the given reason. Basically, it's just a dry-run of `withdraw`. + /// + /// `Err(...)` with the reason why not otherwise. + fn ensure_can_withdraw( + who: &AccountId, + _amount: Self::Balance, + reasons: WithdrawReasons, + new_balance: Self::Balance, + ) -> DispatchResult; + + // PUBLIC MUTABLES (DANGEROUS) + + /// Transfer some liquid free balance to another staker. + /// + /// This is a very high-level function. It will ensure all appropriate fees are paid + /// and no imbalance in the system remains. + fn transfer( + source: &AccountId, + dest: &AccountId, + value: Self::Balance, + existence_requirement: ExistenceRequirement, + ) -> DispatchResult; + + /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the + /// free balance. This function cannot fail. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then a non-zero second item will be returned. + fn slash( + who: &AccountId, + value: Self::Balance + ) -> (Self::NegativeImbalance, Self::Balance); + + /// Mints `value` to the free balance of `who`. + /// + /// If `who` doesn't exist, nothing is done and an Err returned. + fn deposit_into_existing( + who: &AccountId, + value: Self::Balance + ) -> Result; + + /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on + /// success. + fn resolve_into_existing( + who: &AccountId, + value: Self::NegativeImbalance, + ) -> Result<(), Self::NegativeImbalance> { + let v = value.peek(); + match Self::deposit_into_existing(who, v) { + Ok(opposite) => Ok(drop(value.offset(opposite))), + _ => Err(value), + } + } + + /// Adds up to `value` to the free balance of `who`. If `who` doesn't exist, it is created. + /// + /// Infallible. + fn deposit_creating( + who: &AccountId, + value: Self::Balance, + ) -> Self::PositiveImbalance; + + /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on + /// success. + fn resolve_creating( + who: &AccountId, + value: Self::NegativeImbalance, + ) { + let v = value.peek(); + drop(value.offset(Self::deposit_creating(who, v))); + } + + /// Removes some free balance from `who` account for `reason` if possible. If `liveness` is + /// `KeepAlive`, then no less than `ExistentialDeposit` must be left remaining. + /// + /// This checks any locks, vesting, and liquidity requirements. If the removal is not possible, + /// then it returns `Err`. + /// + /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value + /// is `value`. + fn withdraw( + who: &AccountId, + value: Self::Balance, + reasons: WithdrawReasons, + liveness: ExistenceRequirement, + ) -> Result; + + /// Similar to withdraw, only accepts a `PositiveImbalance` and returns nothing on success. + fn settle( + who: &AccountId, + value: Self::PositiveImbalance, + reasons: WithdrawReasons, + liveness: ExistenceRequirement, + ) -> Result<(), Self::PositiveImbalance> { + let v = value.peek(); + match Self::withdraw(who, v, reasons, liveness) { + Ok(opposite) => Ok(drop(value.offset(opposite))), + _ => Err(value), + } + } + + /// Ensure an account's free balance equals some value; this will create the account + /// if needed. + /// + /// Returns a signed imbalance and status to indicate if the account was successfully updated or update + /// has led to killing of the account. + fn make_free_balance_be( + who: &AccountId, + balance: Self::Balance, + ) -> SignedImbalance; +} diff --git a/frame/support/src/traits/tokens/currency/lockable.rs b/frame/support/src/traits/tokens/currency/lockable.rs new file mode 100644 index 0000000000000..ed3d1cf46362b --- /dev/null +++ b/frame/support/src/traits/tokens/currency/lockable.rs @@ -0,0 +1,104 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The lockable currency trait and some associated types. + +use crate::dispatch::DispatchResult; +use crate::traits::misc::Get; +use super::Currency; +use super::super::misc::WithdrawReasons; + +/// An identifier for a lock. Used for disambiguating different locks so that +/// they can be individually replaced or removed. +pub type LockIdentifier = [u8; 8]; + +/// A currency whose accounts can have liquidity restrictions. +pub trait LockableCurrency: Currency { + /// The quantity used to denote time; usually just a `BlockNumber`. + type Moment; + + /// The maximum number of locks a user should have on their account. + type MaxLocks: Get; + + /// Create a new balance lock on account `who`. + /// + /// If the new lock is valid (i.e. not already expired), it will push the struct to + /// the `Locks` vec in storage. Note that you can lock more funds than a user has. + /// + /// If the lock `id` already exists, this will update it. + fn set_lock( + id: LockIdentifier, + who: &AccountId, + amount: Self::Balance, + reasons: WithdrawReasons, + ); + + /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all + /// parameters or creates a new one if it does not exist. + /// + /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it + /// applies the most severe constraints of the two, while `set_lock` replaces the lock + /// with the new parameters. As in, `extend_lock` will set: + /// - maximum `amount` + /// - bitwise mask of all `reasons` + fn extend_lock( + id: LockIdentifier, + who: &AccountId, + amount: Self::Balance, + reasons: WithdrawReasons, + ); + + /// Remove an existing lock. + fn remove_lock( + id: LockIdentifier, + who: &AccountId, + ); +} + +/// A vesting schedule over a currency. This allows a particular currency to have vesting limits +/// applied to it. +pub trait VestingSchedule { + /// The quantity used to denote time; usually just a `BlockNumber`. + type Moment; + + /// The currency that this schedule applies to. + type Currency: Currency; + + /// Get the amount that is currently being vested and cannot be transferred out of this account. + /// Returns `None` if the account has no vesting schedule. + fn vesting_balance(who: &AccountId) -> Option<>::Balance>; + + /// Adds a vesting schedule to a given account. + /// + /// If there already exists a vesting schedule for the given account, an `Err` is returned + /// and nothing is updated. + /// + /// Is a no-op if the amount to be vested is zero. + /// + /// NOTE: This doesn't alter the free balance of the account. + fn add_vesting_schedule( + who: &AccountId, + locked: >::Balance, + per_block: >::Balance, + starting_block: Self::Moment, + ) -> DispatchResult; + + /// Remove a vesting schedule for a given account. + /// + /// NOTE: This doesn't alter the free balance of the account. + fn remove_vesting_schedule(who: &AccountId); +} diff --git a/frame/support/src/traits/tokens/currency/reservable.rs b/frame/support/src/traits/tokens/currency/reservable.rs new file mode 100644 index 0000000000000..14ea1d3a16fb6 --- /dev/null +++ b/frame/support/src/traits/tokens/currency/reservable.rs @@ -0,0 +1,83 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The reservable currency trait. + +use super::Currency; +use super::super::misc::BalanceStatus; +use crate::dispatch::{DispatchResult, DispatchError}; + +/// A currency where funds can be reserved from the user. +pub trait ReservableCurrency: Currency { + /// Same result as `reserve(who, value)` (but without the side-effects) assuming there + /// are no balance changes in the meantime. + fn can_reserve(who: &AccountId, value: Self::Balance) -> bool; + + /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. + /// + /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` + /// is less than `value`, then a non-zero second item will be returned. + fn slash_reserved( + who: &AccountId, + value: Self::Balance + ) -> (Self::NegativeImbalance, Self::Balance); + + /// The amount of the balance of a given account that is externally reserved; this can still get + /// slashed, but gets slashed last of all. + /// + /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens + /// that are still 'owned' by the account holder, but which are suspendable. + /// + /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' + /// is deleted: specifically, `ReservedBalance`. + /// + /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + fn reserved_balance(who: &AccountId) -> Self::Balance; + + /// Moves `value` from balance to reserved balance. + /// + /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will + /// be returned to notify of this. This is different behavior than `unreserve`. + fn reserve(who: &AccountId, value: Self::Balance) -> DispatchResult; + + /// Moves up to `value` from reserved balance to free balance. This function cannot fail. + /// + /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` + /// is less than `value`, then the remaining amount will be returned. + /// + /// # NOTES + /// + /// - This is different from `reserve`. + /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will + /// invoke `on_reserved_too_low` and could reap the account. + fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance; + + /// Moves up to `value` from reserved balance of account `slashed` to balance of account + /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be + /// returned. Funds will be placed in either the `free` balance or the `reserved` balance, + /// depending on the `status`. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then `Ok(non_zero)` will be returned. + fn repatriate_reserved( + slashed: &AccountId, + beneficiary: &AccountId, + value: Self::Balance, + status: BalanceStatus, + ) -> Result; +} diff --git a/frame/support/src/traits/tokens/fungible.rs b/frame/support/src/traits/tokens/fungible.rs new file mode 100644 index 0000000000000..5472212aaa65e --- /dev/null +++ b/frame/support/src/traits/tokens/fungible.rs @@ -0,0 +1,310 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The traits for dealing with a single fungible token class and any associated types. + +use super::*; +use sp_runtime::traits::Saturating; +use crate::traits::misc::Get; +use crate::dispatch::{DispatchResult, DispatchError}; +use super::misc::{DepositConsequence, WithdrawConsequence, Balance}; + +mod balanced; +mod imbalance; +pub use balanced::{Balanced, Unbalanced}; +pub use imbalance::{Imbalance, HandleImbalanceDrop, DebtOf, CreditOf}; + +/// Trait for providing balance-inspection access to a fungible asset. +pub trait Inspect { + /// Scalar type for representing balance of an account. + type Balance: Balance; + + /// The total amount of issuance in the system. + fn total_issuance() -> Self::Balance; + + /// The minimum balance any single account may have. + fn minimum_balance() -> Self::Balance; + + /// Get the balance of `who`. + fn balance(who: &AccountId) -> Self::Balance; + + /// Get the maximum amount that `who` can withdraw/transfer successfully. + fn reducible_balance(who: &AccountId, keep_alive: bool) -> Self::Balance; + + /// Returns `true` if the balance of `who` may be increased by `amount`. + fn can_deposit(who: &AccountId, amount: Self::Balance) -> DepositConsequence; + + /// Returns `Failed` if the balance of `who` may not be decreased by `amount`, otherwise + /// the consequence. + fn can_withdraw(who: &AccountId, amount: Self::Balance) -> WithdrawConsequence; +} + +/// Trait for providing an ERC-20 style fungible asset. +pub trait Mutate: Inspect { + /// Increase the balance of `who` by exactly `amount`, minting new tokens. If that isn't + /// possible then an `Err` is returned and nothing is changed. + fn mint_into(who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Decrease the balance of `who` by at least `amount`, possibly slightly more in the case of + /// minimum_balance requirements, burning the tokens. If that isn't possible then an `Err` is + /// returned and nothing is changed. If successful, the amount of tokens reduced is returned. + fn burn_from(who: &AccountId, amount: Self::Balance) -> Result; + + /// Attempt to reduce the balance of `who` by as much as possible up to `amount`, and possibly + /// slightly more due to minimum_balance requirements. If no decrease is possible then an `Err` + /// is returned and nothing is changed. If successful, the amount of tokens reduced is returned. + /// + /// The default implementation just uses `withdraw` along with `reducible_balance` to ensure + /// that is doesn't fail. + fn slash(who: &AccountId, amount: Self::Balance) -> Result { + Self::burn_from(who, Self::reducible_balance(who, false).min(amount)) + } + + /// Transfer funds from one account into another. The default implementation uses `mint_into` + /// and `burn_from` and may generate unwanted events. + fn teleport( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + ) -> Result { + let extra = Self::can_withdraw(&source, amount).into_result()?; + Self::can_deposit(&dest, amount.saturating_add(extra)).into_result()?; + let actual = Self::burn_from(source, amount)?; + debug_assert!(actual == amount.saturating_add(extra), "can_withdraw must agree with withdraw; qed"); + match Self::mint_into(dest, actual) { + Ok(_) => Ok(actual), + Err(err) => { + debug_assert!(false, "can_deposit returned true previously; qed"); + // attempt to return the funds back to source + let revert = Self::mint_into(source, actual); + debug_assert!(revert.is_ok(), "withdrew funds previously; qed"); + Err(err) + } + } + } +} + +/// Trait for providing a fungible asset which can only be transferred. +pub trait Transfer: Inspect { + /// Transfer funds from one account into another. + fn transfer( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + keep_alive: bool, + ) -> Result; +} + +/// Trait for inspecting a fungible asset which can be reserved. +pub trait InspectHold: Inspect { + /// Amount of funds held in reserve by `who`. + fn balance_on_hold(who: &AccountId) -> Self::Balance; + + /// Check to see if some `amount` of funds of `who` may be placed on hold. + fn can_hold(who: &AccountId, amount: Self::Balance) -> bool; +} + +/// Trait for mutating a fungible asset which can be reserved. +pub trait MutateHold: InspectHold + Transfer { + /// Hold some funds in an account. + fn hold(who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Release up to `amount` held funds in an account. + /// + /// The actual amount released is returned with `Ok`. + /// + /// If `best_effort` is `true`, then the amount actually unreserved and returned as the inner + /// value of `Ok` may be smaller than the `amount` passed. + fn release(who: &AccountId, amount: Self::Balance, best_effort: bool) + -> Result; + + /// Transfer held funds into a destination account. + /// + /// If `on_hold` is `true`, then the destination account must already exist and the assets + /// transferred will still be on hold in the destination account. If not, then the destination + /// account need not already exist, but must be creatable. + /// + /// If `best_effort` is `true`, then an amount less than `amount` may be transferred without + /// error. + /// + /// The actual amount transferred is returned, or `Err` in the case of error and nothing is + /// changed. + fn transfer_held( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + best_effort: bool, + on_held: bool, + ) -> Result; +} + +/// Trait for slashing a fungible asset which can be reserved. +pub trait BalancedHold: Balanced + MutateHold { + /// Reduce the balance of some funds on hold in an account. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds that are on hold up to `amount` will be deducted as possible. If this is less + /// than `amount`, then a non-zero second item will be returned. + fn slash_held(who: &AccountId, amount: Self::Balance) + -> (CreditOf, Self::Balance); +} + +impl< + AccountId, + T: Balanced + MutateHold, +> BalancedHold for T { + fn slash_held(who: &AccountId, amount: Self::Balance) + -> (CreditOf, Self::Balance) + { + let actual = match Self::release(who, amount, true) { + Ok(x) => x, + Err(_) => return (Imbalance::default(), amount), + }; + >::slash(who, actual) + } +} + +/// Convert a `fungibles` trait implementation into a `fungible` trait implementation by identifying +/// a single item. +pub struct ItemOf< + F: fungibles::Inspect, + A: Get<>::AssetId>, + AccountId, +>( + sp_std::marker::PhantomData<(F, A, AccountId)> +); + +impl< + F: fungibles::Inspect, + A: Get<>::AssetId>, + AccountId, +> Inspect for ItemOf { + type Balance = >::Balance; + fn total_issuance() -> Self::Balance { + >::total_issuance(A::get()) + } + fn minimum_balance() -> Self::Balance { + >::minimum_balance(A::get()) + } + fn balance(who: &AccountId) -> Self::Balance { + >::balance(A::get(), who) + } + fn reducible_balance(who: &AccountId, keep_alive: bool) -> Self::Balance { + >::reducible_balance(A::get(), who, keep_alive) + } + fn can_deposit(who: &AccountId, amount: Self::Balance) -> DepositConsequence { + >::can_deposit(A::get(), who, amount) + } + fn can_withdraw(who: &AccountId, amount: Self::Balance) -> WithdrawConsequence { + >::can_withdraw(A::get(), who, amount) + } +} + +impl< + F: fungibles::Mutate, + A: Get<>::AssetId>, + AccountId, +> Mutate for ItemOf { + fn mint_into(who: &AccountId, amount: Self::Balance) -> DispatchResult { + >::mint_into(A::get(), who, amount) + } + fn burn_from(who: &AccountId, amount: Self::Balance) -> Result { + >::burn_from(A::get(), who, amount) + } +} + +impl< + F: fungibles::Transfer, + A: Get<>::AssetId>, + AccountId, +> Transfer for ItemOf { + fn transfer(source: &AccountId, dest: &AccountId, amount: Self::Balance, keep_alive: bool) + -> Result + { + >::transfer(A::get(), source, dest, amount, keep_alive) + } +} + +impl< + F: fungibles::InspectHold, + A: Get<>::AssetId>, + AccountId, +> InspectHold for ItemOf { + fn balance_on_hold(who: &AccountId) -> Self::Balance { + >::balance_on_hold(A::get(), who) + } + fn can_hold(who: &AccountId, amount: Self::Balance) -> bool { + >::can_hold(A::get(), who, amount) + } +} + +impl< + F: fungibles::MutateHold, + A: Get<>::AssetId>, + AccountId, +> MutateHold for ItemOf { + fn hold(who: &AccountId, amount: Self::Balance) -> DispatchResult { + >::hold(A::get(), who, amount) + } + fn release(who: &AccountId, amount: Self::Balance, best_effort: bool) + -> Result + { + >::release(A::get(), who, amount, best_effort) + } + fn transfer_held( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + best_effort: bool, + on_hold: bool, + ) -> Result { + >::transfer_held( + A::get(), + source, + dest, + amount, + best_effort, + on_hold, + ) + } +} + +impl< + F: fungibles::Unbalanced, + A: Get<>::AssetId>, + AccountId, +> Unbalanced for ItemOf { + fn set_balance(who: &AccountId, amount: Self::Balance) -> DispatchResult { + >::set_balance(A::get(), who, amount) + } + fn set_total_issuance(amount: Self::Balance) -> () { + >::set_total_issuance(A::get(), amount) + } + fn decrease_balance(who: &AccountId, amount: Self::Balance) -> Result { + >::decrease_balance(A::get(), who, amount) + } + fn decrease_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { + >::decrease_balance_at_most(A::get(), who, amount) + } + fn increase_balance(who: &AccountId, amount: Self::Balance) -> Result { + >::increase_balance(A::get(), who, amount) + } + fn increase_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { + >::increase_balance_at_most(A::get(), who, amount) + } +} diff --git a/frame/support/src/traits/tokens/fungible/balanced.rs b/frame/support/src/traits/tokens/fungible/balanced.rs new file mode 100644 index 0000000000000..19bdb4f245ee8 --- /dev/null +++ b/frame/support/src/traits/tokens/fungible/balanced.rs @@ -0,0 +1,360 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The trait and associated types for sets of fungible tokens that manage total issuance without +//! requiring atomic balanced operations. + +use super::*; +use sp_std::marker::PhantomData; +use sp_runtime::{TokenError, traits::{CheckedAdd, Zero}}; +use super::super::Imbalance as ImbalanceT; +use crate::traits::misc::{SameOrOther, TryDrop}; +use crate::dispatch::{DispatchResult, DispatchError}; + +/// A fungible token class where any creation and deletion of tokens is semi-explicit and where the +/// total supply is maintained automatically. +/// +/// This is auto-implemented when a token class has `Unbalanced` implemented. +pub trait Balanced: Inspect { + /// The type for managing what happens when an instance of `Debt` is dropped without being used. + type OnDropDebt: HandleImbalanceDrop; + /// The type for managing what happens when an instance of `Credit` is dropped without being + /// used. + type OnDropCredit: HandleImbalanceDrop; + + /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will + /// typically be used to reduce an account by the same amount with e.g. `settle`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example + /// in the case of underflow. + fn rescind(amount: Self::Balance) -> DebtOf; + + /// Increase the total issuance by `amount` and return the according imbalance. The imbalance + /// will typically be used to increase an account by the same amount with e.g. + /// `resolve_into_existing` or `resolve_creating`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example + /// in the case of overflow. + fn issue(amount: Self::Balance) -> CreditOf; + + /// Produce a pair of imbalances that cancel each other out exactly. + /// + /// This is just the same as burning and issuing the same amount and has no effect on the + /// total issuance. + fn pair(amount: Self::Balance) -> (DebtOf, CreditOf) { + (Self::rescind(amount), Self::issue(amount)) + } + + /// Deducts up to `value` from the combined balance of `who`. This function cannot fail. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then a non-zero second item will be returned. + fn slash( + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance); + + /// Mints exactly `value` into the account of `who`. + /// + /// If `who` doesn't exist, nothing is done and an `Err` returned. This could happen because it + /// the account doesn't yet exist and it isn't possible to create it under the current + /// circumstances and with `value` in it. + fn deposit( + who: &AccountId, + value: Self::Balance, + ) -> Result, DispatchError>; + + /// Removes `value` balance from `who` account if possible. + /// + /// If the removal is not possible, then it returns `Err` and nothing is changed. + /// + /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value + /// is no less than `value`. It may be more in the case that removing it reduced it below + /// `Self::minimum_balance()`. + fn withdraw( + who: &AccountId, + value: Self::Balance, + //TODO: liveness: ExistenceRequirement, + ) -> Result, DispatchError>; + + /// The balance of `who` is increased in order to counter `credit`. If the whole of `credit` + /// cannot be countered, then nothing is changed and the original `credit` is returned in an + /// `Err`. + /// + /// Please note: If `credit.peek()` is less than `Self::minimum_balance()`, then `who` must + /// already exist for this to succeed. + fn resolve( + who: &AccountId, + credit: CreditOf, + ) -> Result<(), CreditOf> { + let v = credit.peek(); + let debt = match Self::deposit(who, v) { + Err(_) => return Err(credit), + Ok(d) => d, + }; + let result = credit.offset(debt).try_drop(); + debug_assert!(result.is_ok(), "ok deposit return must be equal to credit value; qed"); + Ok(()) + } + + /// The balance of `who` is decreased in order to counter `debt`. If the whole of `debt` + /// cannot be countered, then nothing is changed and the original `debt` is returned in an + /// `Err`. + fn settle( + who: &AccountId, + debt: DebtOf, + //TODO: liveness: ExistenceRequirement, + ) -> Result, DebtOf> { + let amount = debt.peek(); + let credit = match Self::withdraw(who, amount) { + Err(_) => return Err(debt), + Ok(d) => d, + }; + match credit.offset(debt) { + SameOrOther::None => Ok(CreditOf::::zero()), + SameOrOther::Same(dust) => Ok(dust), + SameOrOther::Other(rest) => { + debug_assert!(false, "ok withdraw return must be at least debt value; qed"); + Err(rest) + } + } + } +} + +/// A fungible token class where the balance can be set arbitrarily. +/// +/// **WARNING** +/// Do not use this directly unless you want trouble, since it allows you to alter account balances +/// without keeping the issuance up to date. It has no safeguards against accidentally creating +/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to +/// use. +pub trait Unbalanced: Inspect { + /// Set the balance of `who` to `amount`. If this cannot be done for some reason (e.g. + /// because the account cannot be created or an overflow) then an `Err` is returned. + fn set_balance(who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Set the total issuance to `amount`. + fn set_total_issuance(amount: Self::Balance); + + /// Reduce the balance of `who` by `amount`. If it cannot be reduced by that amount for + /// some reason, return `Err` and don't reduce it at all. If Ok, return the imbalance. + /// + /// Minimum balance will be respected and the returned imbalance may be up to + /// `Self::minimum_balance() - 1` greater than `amount`. + fn decrease_balance(who: &AccountId, amount: Self::Balance) + -> Result + { + let old_balance = Self::balance(who); + let (mut new_balance, mut amount) = if old_balance < amount { + Err(TokenError::NoFunds)? + } else { + (old_balance - amount, amount) + }; + if new_balance < Self::minimum_balance() { + amount = amount.saturating_add(new_balance); + new_balance = Zero::zero(); + } + // Defensive only - this should not fail now. + Self::set_balance(who, new_balance)?; + Ok(amount) + } + + /// Reduce the balance of `who` by the most that is possible, up to `amount`. + /// + /// Minimum balance will be respected and the returned imbalance may be up to + /// `Self::minimum_balance() - 1` greater than `amount`. + /// + /// Return the imbalance by which the account was reduced. + fn decrease_balance_at_most(who: &AccountId, amount: Self::Balance) + -> Self::Balance + { + let old_balance = Self::balance(who); + let (mut new_balance, mut amount) = if old_balance < amount { + (Zero::zero(), old_balance) + } else { + (old_balance - amount, amount) + }; + let minimum_balance = Self::minimum_balance(); + if new_balance < minimum_balance { + amount = amount.saturating_add(new_balance); + new_balance = Zero::zero(); + } + let mut r = Self::set_balance(who, new_balance); + if r.is_err() { + // Some error, probably because we tried to destroy an account which cannot be destroyed. + if new_balance.is_zero() && amount >= minimum_balance { + new_balance = minimum_balance; + amount -= minimum_balance; + r = Self::set_balance(who, new_balance); + } + if r.is_err() { + // Still an error. Apparently it's not possible to reduce at all. + amount = Zero::zero(); + } + } + amount + } + + /// Increase the balance of `who` by `amount`. If it cannot be increased by that amount + /// for some reason, return `Err` and don't increase it at all. If Ok, return the imbalance. + /// + /// Minimum balance will be respected and an error will be returned if + /// `amount < Self::minimum_balance()` when the account of `who` is zero. + fn increase_balance(who: &AccountId, amount: Self::Balance) + -> Result + { + let old_balance = Self::balance(who); + let new_balance = old_balance.checked_add(&amount).ok_or(TokenError::Overflow)?; + if new_balance < Self::minimum_balance() { + Err(TokenError::BelowMinimum)? + } + if old_balance != new_balance { + Self::set_balance(who, new_balance)?; + } + Ok(amount) + } + + /// Increase the balance of `who` by the most that is possible, up to `amount`. + /// + /// Minimum balance will be respected and the returned imbalance will be zero in the case that + /// `amount < Self::minimum_balance()`. + /// + /// Return the imbalance by which the account was increased. + fn increase_balance_at_most(who: &AccountId, amount: Self::Balance) + -> Self::Balance + { + let old_balance = Self::balance(who); + let mut new_balance = old_balance.saturating_add(amount); + let mut amount = new_balance - old_balance; + if new_balance < Self::minimum_balance() { + new_balance = Zero::zero(); + amount = Zero::zero(); + } + if old_balance == new_balance || Self::set_balance(who, new_balance).is_ok() { + amount + } else { + Zero::zero() + } + } +} + +/// Simple handler for an imbalance drop which increases the total issuance of the system by the +/// imbalance amount. Used for leftover debt. +pub struct IncreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for IncreaseIssuance +{ + fn handle(amount: U::Balance) { + U::set_total_issuance(U::total_issuance().saturating_add(amount)) + } +} + +/// Simple handler for an imbalance drop which decreases the total issuance of the system by the +/// imbalance amount. Used for leftover credit. +pub struct DecreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for DecreaseIssuance +{ + fn handle(amount: U::Balance) { + U::set_total_issuance(U::total_issuance().saturating_sub(amount)) + } +} + +/// An imbalance type which uses `DecreaseIssuance` to deal with anything `Drop`ed. +/// +/// Basically means that funds in someone's account have been removed and not yet placed anywhere +/// else. If it gets dropped, then those funds will be assumed to be "burned" and the total supply +/// will be accordingly decreased to ensure it equals the sum of the balances of all accounts. +type Credit = Imbalance< + >::Balance, + DecreaseIssuance, + IncreaseIssuance, +>; + +/// An imbalance type which uses `IncreaseIssuance` to deal with anything `Drop`ed. +/// +/// Basically means that there are funds in someone's account whose origin is as yet unaccounted +/// for. If it gets dropped, then those funds will be assumed to be "minted" and the total supply +/// will be accordingly increased to ensure it equals the sum of the balances of all accounts. +type Debt = Imbalance< + >::Balance, + IncreaseIssuance, + DecreaseIssuance, +>; + +/// Create some `Credit` item. Only for internal use. +fn credit>( + amount: U::Balance, +) -> Credit { + Imbalance::new(amount) +} + +/// Create some `Debt` item. Only for internal use. +fn debt>( + amount: U::Balance, +) -> Debt { + Imbalance::new(amount) +} + +impl> Balanced for U { + type OnDropCredit = DecreaseIssuance; + type OnDropDebt = IncreaseIssuance; + fn rescind(amount: Self::Balance) -> Debt { + let old = U::total_issuance(); + let new = old.saturating_sub(amount); + U::set_total_issuance(new); + debt(old - new) + } + fn issue(amount: Self::Balance) -> Credit { + let old = U::total_issuance(); + let new = old.saturating_add(amount); + U::set_total_issuance(new); + credit(new - old) + } + fn slash( + who: &AccountId, + amount: Self::Balance, + ) -> (Credit, Self::Balance) { + let slashed = U::decrease_balance_at_most(who, amount); + // `slashed` could be less than, greater than or equal to `amount`. + // If slashed == amount, it means the account had at least amount in it and it could all be + // removed without a problem. + // If slashed > amount, it means the account had more than amount in it, but not enough more + // to push it over minimum_balance. + // If slashed < amount, it means the account didn't have enough in it to be reduced by + // `amount` without being destroyed. + (credit(slashed), amount.saturating_sub(slashed)) + } + fn deposit( + who: &AccountId, + amount: Self::Balance + ) -> Result, DispatchError> { + let increase = U::increase_balance(who, amount)?; + Ok(debt(increase)) + } + fn withdraw( + who: &AccountId, + amount: Self::Balance, + //TODO: liveness: ExistenceRequirement, + ) -> Result, DispatchError> { + let decrease = U::decrease_balance(who, amount)?; + Ok(credit(decrease)) + } +} diff --git a/frame/support/src/traits/tokens/fungible/imbalance.rs b/frame/support/src/traits/tokens/fungible/imbalance.rs new file mode 100644 index 0000000000000..c084fa97fbec0 --- /dev/null +++ b/frame/support/src/traits/tokens/fungible/imbalance.rs @@ -0,0 +1,162 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The imbalance type and its associates, which handles keeps everything adding up properly with +//! unbalanced operations. + +use super::*; +use sp_std::marker::PhantomData; +use sp_runtime::traits::Zero; +use super::misc::Balance; +use super::balanced::Balanced; +use crate::traits::misc::{TryDrop, SameOrOther}; +use super::super::Imbalance as ImbalanceT; + +/// Handler for when an imbalance gets dropped. This could handle either a credit (negative) or +/// debt (positive) imbalance. +pub trait HandleImbalanceDrop { + /// Some something with the imbalance's value which is being dropped. + fn handle(amount: Balance); +} + +/// An imbalance in the system, representing a divergence of recorded token supply from the sum of +/// the balances of all accounts. This is `must_use` in order to ensure it gets handled (placing +/// into an account, settling from an account or altering the supply). +/// +/// Importantly, it has a special `Drop` impl, and cannot be created outside of this module. +#[must_use] +pub struct Imbalance< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> { + amount: B, + _phantom: PhantomData<(OnDrop, OppositeOnDrop)>, +} + +impl< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop +> Drop for Imbalance { + fn drop(&mut self) { + if !self.amount.is_zero() { + OnDrop::handle(self.amount) + } + } +} + +impl< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> TryDrop for Imbalance { + /// Drop an instance cleanly. Only works if its value represents "no-operation". + fn try_drop(self) -> Result<(), Self> { + self.drop_zero() + } +} + +impl< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> Default for Imbalance { + fn default() -> Self { + Self::zero() + } +} + +impl< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> Imbalance { + pub(crate) fn new(amount: B) -> Self { + Self { amount, _phantom: PhantomData } + } +} + +impl< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> ImbalanceT for Imbalance { + type Opposite = Imbalance; + + fn zero() -> Self { + Self { amount: Zero::zero(), _phantom: PhantomData } + } + + fn drop_zero(self) -> Result<(), Self> { + if self.amount.is_zero() { + sp_std::mem::forget(self); + Ok(()) + } else { + Err(self) + } + } + + fn split(self, amount: B) -> (Self, Self) { + let first = self.amount.min(amount); + let second = self.amount - first; + sp_std::mem::forget(self); + (Imbalance::new(first), Imbalance::new(second)) + } + fn merge(mut self, other: Self) -> Self { + self.amount = self.amount.saturating_add(other.amount); + sp_std::mem::forget(other); + self + } + fn subsume(&mut self, other: Self) { + self.amount = self.amount.saturating_add(other.amount); + sp_std::mem::forget(other); + } + fn offset(self, other: Imbalance) + -> SameOrOther> + { + let (a, b) = (self.amount, other.amount); + sp_std::mem::forget((self, other)); + + if a == b { + SameOrOther::None + } else if a > b { + SameOrOther::Same(Imbalance::new(a - b)) + } else { + SameOrOther::Other(Imbalance::::new(b - a)) + } + } + fn peek(&self) -> B { + self.amount + } +} + +/// Imbalance implying that the total_issuance value is less than the sum of all account balances. +pub type DebtOf = Imbalance< + >::Balance, + // This will generally be implemented by increasing the total_issuance value. + >::OnDropDebt, + >::OnDropCredit, +>; + +/// Imbalance implying that the total_issuance value is greater than the sum of all account balances. +pub type CreditOf = Imbalance< + >::Balance, + // This will generally be implemented by decreasing the total_issuance value. + >::OnDropCredit, + >::OnDropDebt, +>; diff --git a/frame/support/src/traits/tokens/fungibles.rs b/frame/support/src/traits/tokens/fungibles.rs new file mode 100644 index 0000000000000..490f28dfb453a --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles.rs @@ -0,0 +1,210 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The traits for sets of fungible tokens and any associated types. + +use super::*; +use crate::dispatch::{DispatchError, DispatchResult}; +use super::misc::{AssetId, Balance}; +use sp_runtime::traits::Saturating; + +mod balanced; +pub use balanced::{Balanced, Unbalanced}; +mod imbalance; +pub use imbalance::{Imbalance, HandleImbalanceDrop, DebtOf, CreditOf}; + +/// Trait for providing balance-inspection access to a set of named fungible assets. +pub trait Inspect { + /// Means of identifying one asset class from another. + type AssetId: AssetId; + + /// Scalar type for representing balance of an account. + type Balance: Balance; + + /// The total amount of issuance in the system. + fn total_issuance(asset: Self::AssetId) -> Self::Balance; + + /// The minimum balance any single account may have. + fn minimum_balance(asset: Self::AssetId) -> Self::Balance; + + /// Get the `asset` balance of `who`. + fn balance(asset: Self::AssetId, who: &AccountId) -> Self::Balance; + + /// Get the maximum amount of `asset` that `who` can withdraw/transfer successfully. + fn reducible_balance(asset: Self::AssetId, who: &AccountId, keep_alive: bool) -> Self::Balance; + + /// Returns `true` if the `asset` balance of `who` may be increased by `amount`. + fn can_deposit(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> DepositConsequence; + + /// Returns `Failed` if the `asset` balance of `who` may not be decreased by `amount`, otherwise + /// the consequence. + fn can_withdraw( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence; +} + +/// Trait for providing a set of named fungible assets which can be created and destroyed. +pub trait Mutate: Inspect { + /// Attempt to increase the `asset` balance of `who` by `amount`. + /// + /// If not possible then don't do anything. Possible reasons for failure include: + /// - Minimum balance not met. + /// - Account cannot be created (e.g. because there is no provider reference and/or the asset + /// isn't considered worth anything). + /// + /// Since this is an operation which should be possible to take alone, if successful it will + /// increase the overall supply of the underlying token. + fn mint_into(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Attempt to reduce the `asset` balance of `who` by `amount`. + /// + /// If not possible then don't do anything. Possible reasons for failure include: + /// - Less funds in the account than `amount` + /// - Liquidity requirements (locks, reservations) prevent the funds from being removed + /// - Operation would require destroying the account and it is required to stay alive (e.g. + /// because it's providing a needed provider reference). + /// + /// Since this is an operation which should be possible to take alone, if successful it will + /// reduce the overall supply of the underlying token. + /// + /// Due to minimum balance requirements, it's possible that the amount withdrawn could be up to + /// `Self::minimum_balance() - 1` more than the `amount`. The total amount withdrawn is returned + /// in an `Ok` result. This may be safely ignored if you don't mind the overall supply reducing. + fn burn_from(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> Result; + + /// Attempt to reduce the `asset` balance of `who` by as much as possible up to `amount`, and + /// possibly slightly more due to minimum_balance requirements. If no decrease is possible then + /// an `Err` is returned and nothing is changed. If successful, the amount of tokens reduced is + /// returned. + /// + /// The default implementation just uses `withdraw` along with `reducible_balance` to ensure + /// that is doesn't fail. + fn slash(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> Result + { + Self::burn_from(asset, who, Self::reducible_balance(asset, who, false).min(amount)) + } + + /// Transfer funds from one account into another. The default implementation uses `mint_into` + /// and `burn_from` and may generate unwanted events. + fn teleport( + asset: Self::AssetId, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + ) -> Result { + let extra = Self::can_withdraw(asset, &source, amount).into_result()?; + Self::can_deposit(asset, &dest, amount.saturating_add(extra)).into_result()?; + let actual = Self::burn_from(asset, source, amount)?; + debug_assert!(actual == amount.saturating_add(extra), "can_withdraw must agree with withdraw; qed"); + match Self::mint_into(asset, dest, actual) { + Ok(_) => Ok(actual), + Err(err) => { + debug_assert!(false, "can_deposit returned true previously; qed"); + // attempt to return the funds back to source + let revert = Self::mint_into(asset, source, actual); + debug_assert!(revert.is_ok(), "withdrew funds previously; qed"); + Err(err) + } + } + } +} + +/// Trait for providing a set of named fungible assets which can only be transferred. +pub trait Transfer: Inspect { + /// Transfer funds from one account into another. + fn transfer( + asset: Self::AssetId, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + keep_alive: bool, + ) -> Result; +} + +/// Trait for inspecting a set of named fungible assets which can be placed on hold. +pub trait InspectHold: Inspect { + /// Amount of funds held in hold. + fn balance_on_hold(asset: Self::AssetId, who: &AccountId) -> Self::Balance; + + /// Check to see if some `amount` of `asset` may be held on the account of `who`. + fn can_hold(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> bool; +} + +/// Trait for mutating a set of named fungible assets which can be placed on hold. +pub trait MutateHold: InspectHold + Transfer { + /// Hold some funds in an account. + fn hold(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Release some funds in an account from being on hold. + /// + /// If `best_effort` is `true`, then the amount actually released and returned as the inner + /// value of `Ok` may be smaller than the `amount` passed. + fn release(asset: Self::AssetId, who: &AccountId, amount: Self::Balance, best_effort: bool) + -> Result; + + /// Transfer held funds into a destination account. + /// + /// If `on_hold` is `true`, then the destination account must already exist and the assets + /// transferred will still be on hold in the destination account. If not, then the destination + /// account need not already exist, but must be creatable. + /// + /// If `best_effort` is `true`, then an amount less than `amount` may be transferred without + /// error. + /// + /// The actual amount transferred is returned, or `Err` in the case of error and nothing is + /// changed. + fn transfer_held( + asset: Self::AssetId, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + best_effort: bool, + on_hold: bool, + ) -> Result; +} + +/// Trait for mutating one of several types of fungible assets which can be held. +pub trait BalancedHold: Balanced + MutateHold { + /// Release and slash some funds in an account. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `amount` will be deducted as possible. If this is less than `amount`, + /// then a non-zero second item will be returned. + fn slash_held(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> (CreditOf, Self::Balance); +} + +impl< + AccountId, + T: Balanced + MutateHold, +> BalancedHold for T { + fn slash_held(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> (CreditOf, Self::Balance) + { + let actual = match Self::release(asset, who, amount, true) { + Ok(x) => x, + Err(_) => return (Imbalance::zero(asset), amount), + }; + >::slash(asset, who, actual) + } +} diff --git a/frame/support/src/traits/tokens/fungibles/balanced.rs b/frame/support/src/traits/tokens/fungibles/balanced.rs new file mode 100644 index 0000000000000..efb21300bcaa8 --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/balanced.rs @@ -0,0 +1,378 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The trait and associated types for sets of fungible tokens that manage total issuance without +//! requiring atomic balanced operations. + +use super::*; +use sp_std::marker::PhantomData; +use sp_runtime::{TokenError, traits::{Zero, CheckedAdd}}; +use sp_arithmetic::traits::Saturating; +use crate::dispatch::{DispatchError, DispatchResult}; +use crate::traits::misc::{SameOrOther, TryDrop}; + +/// A fungible token class where any creation and deletion of tokens is semi-explicit and where the +/// total supply is maintained automatically. +/// +/// This is auto-implemented when a token class has `Unbalanced` implemented. +pub trait Balanced: Inspect { + /// The type for managing what happens when an instance of `Debt` is dropped without being used. + type OnDropDebt: HandleImbalanceDrop; + /// The type for managing what happens when an instance of `Credit` is dropped without being + /// used. + type OnDropCredit: HandleImbalanceDrop; + + /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will + /// typically be used to reduce an account by the same amount with e.g. `settle`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example + /// in the case of underflow. + fn rescind(asset: Self::AssetId, amount: Self::Balance) -> DebtOf; + + /// Increase the total issuance by `amount` and return the according imbalance. The imbalance + /// will typically be used to increase an account by the same amount with e.g. + /// `resolve_into_existing` or `resolve_creating`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example + /// in the case of overflow. + fn issue(asset: Self::AssetId, amount: Self::Balance) -> CreditOf; + + /// Produce a pair of imbalances that cancel each other out exactly. + /// + /// This is just the same as burning and issuing the same amount and has no effect on the + /// total issuance. + fn pair(asset: Self::AssetId, amount: Self::Balance) + -> (DebtOf, CreditOf) + { + (Self::rescind(asset, amount), Self::issue(asset, amount)) + } + + /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the + /// free balance. This function cannot fail. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then a non-zero second item will be returned. + fn slash( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance); + + /// Mints exactly `value` into the `asset` account of `who`. + /// + /// If `who` doesn't exist, nothing is done and an `Err` returned. This could happen because it + /// the account doesn't yet exist and it isn't possible to create it under the current + /// circumstances and with `value` in it. + fn deposit( + asset: Self::AssetId, + who: &AccountId, + value: Self::Balance, + ) -> Result, DispatchError>; + + /// Removes `value` free `asset` balance from `who` account if possible. + /// + /// If the removal is not possible, then it returns `Err` and nothing is changed. + /// + /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value + /// is no less than `value`. It may be more in the case that removing it reduced it below + /// `Self::minimum_balance()`. + fn withdraw( + asset: Self::AssetId, + who: &AccountId, + value: Self::Balance, + //TODO: liveness: ExistenceRequirement, + ) -> Result, DispatchError>; + + /// The balance of `who` is increased in order to counter `credit`. If the whole of `credit` + /// cannot be countered, then nothing is changed and the original `credit` is returned in an + /// `Err`. + /// + /// Please note: If `credit.peek()` is less than `Self::minimum_balance()`, then `who` must + /// already exist for this to succeed. + fn resolve( + who: &AccountId, + credit: CreditOf, + ) -> Result<(), CreditOf> { + let v = credit.peek(); + let debt = match Self::deposit(credit.asset(), who, v) { + Err(_) => return Err(credit), + Ok(d) => d, + }; + if let Ok(result) = credit.offset(debt) { + let result = result.try_drop(); + debug_assert!(result.is_ok(), "ok deposit return must be equal to credit value; qed"); + } else { + debug_assert!(false, "debt.asset is credit.asset; qed"); + } + Ok(()) + } + + /// The balance of `who` is decreased in order to counter `debt`. If the whole of `debt` + /// cannot be countered, then nothing is changed and the original `debt` is returned in an + /// `Err`. + fn settle( + who: &AccountId, + debt: DebtOf, + //TODO: liveness: ExistenceRequirement, + ) -> Result, DebtOf> { + let amount = debt.peek(); + let asset = debt.asset(); + let credit = match Self::withdraw(asset, who, amount) { + Err(_) => return Err(debt), + Ok(d) => d, + }; + match credit.offset(debt) { + Ok(SameOrOther::None) => Ok(CreditOf::::zero(asset)), + Ok(SameOrOther::Same(dust)) => Ok(dust), + Ok(SameOrOther::Other(rest)) => { + debug_assert!(false, "ok withdraw return must be at least debt value; qed"); + Err(rest) + } + Err(_) => { + debug_assert!(false, "debt.asset is credit.asset; qed"); + Ok(CreditOf::::zero(asset)) + } + } + } +} + +/// A fungible token class where the balance can be set arbitrarily. +/// +/// **WARNING** +/// Do not use this directly unless you want trouble, since it allows you to alter account balances +/// without keeping the issuance up to date. It has no safeguards against accidentally creating +/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to +/// use. +pub trait Unbalanced: Inspect { + /// Set the `asset` balance of `who` to `amount`. If this cannot be done for some reason (e.g. + /// because the account cannot be created or an overflow) then an `Err` is returned. + fn set_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Set the total issuance of `asset` to `amount`. + fn set_total_issuance(asset: Self::AssetId, amount: Self::Balance); + + /// Reduce the `asset` balance of `who` by `amount`. If it cannot be reduced by that amount for + /// some reason, return `Err` and don't reduce it at all. If Ok, return the imbalance. + /// + /// Minimum balance will be respected and the returned imbalance may be up to + /// `Self::minimum_balance() - 1` greater than `amount`. + fn decrease_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> Result + { + let old_balance = Self::balance(asset, who); + let (mut new_balance, mut amount) = if old_balance < amount { + Err(TokenError::NoFunds)? + } else { + (old_balance - amount, amount) + }; + if new_balance < Self::minimum_balance(asset) { + amount = amount.saturating_add(new_balance); + new_balance = Zero::zero(); + } + // Defensive only - this should not fail now. + Self::set_balance(asset, who, new_balance)?; + Ok(amount) + } + + /// Reduce the `asset` balance of `who` by the most that is possible, up to `amount`. + /// + /// Minimum balance will be respected and the returned imbalance may be up to + /// `Self::minimum_balance() - 1` greater than `amount`. + /// + /// Return the imbalance by which the account was reduced. + fn decrease_balance_at_most(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> Self::Balance + { + let old_balance = Self::balance(asset, who); + let (mut new_balance, mut amount) = if old_balance < amount { + (Zero::zero(), old_balance) + } else { + (old_balance - amount, amount) + }; + let minimum_balance = Self::minimum_balance(asset); + if new_balance < minimum_balance { + amount = amount.saturating_add(new_balance); + new_balance = Zero::zero(); + } + let mut r = Self::set_balance(asset, who, new_balance); + if r.is_err() { + // Some error, probably because we tried to destroy an account which cannot be destroyed. + if new_balance.is_zero() && amount >= minimum_balance { + new_balance = minimum_balance; + amount -= minimum_balance; + r = Self::set_balance(asset, who, new_balance); + } + if r.is_err() { + // Still an error. Apparently it's not possible to reduce at all. + amount = Zero::zero(); + } + } + amount + } + + /// Increase the `asset` balance of `who` by `amount`. If it cannot be increased by that amount + /// for some reason, return `Err` and don't increase it at all. If Ok, return the imbalance. + /// + /// Minimum balance will be respected and an error will be returned if + /// `amount < Self::minimum_balance()` when the account of `who` is zero. + fn increase_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> Result + { + let old_balance = Self::balance(asset, who); + let new_balance = old_balance.checked_add(&amount).ok_or(TokenError::Overflow)?; + if new_balance < Self::minimum_balance(asset) { + Err(TokenError::BelowMinimum)? + } + if old_balance != new_balance { + Self::set_balance(asset, who, new_balance)?; + } + Ok(amount) + } + + /// Increase the `asset` balance of `who` by the most that is possible, up to `amount`. + /// + /// Minimum balance will be respected and the returned imbalance will be zero in the case that + /// `amount < Self::minimum_balance()`. + /// + /// Return the imbalance by which the account was increased. + fn increase_balance_at_most(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> Self::Balance + { + let old_balance = Self::balance(asset, who); + let mut new_balance = old_balance.saturating_add(amount); + let mut amount = new_balance - old_balance; + if new_balance < Self::minimum_balance(asset) { + new_balance = Zero::zero(); + amount = Zero::zero(); + } + if old_balance == new_balance || Self::set_balance(asset, who, new_balance).is_ok() { + amount + } else { + Zero::zero() + } + } +} + +/// Simple handler for an imbalance drop which increases the total issuance of the system by the +/// imbalance amount. Used for leftover debt. +pub struct IncreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for IncreaseIssuance +{ + fn handle(asset: U::AssetId, amount: U::Balance) { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_add(amount)) + } +} + +/// Simple handler for an imbalance drop which decreases the total issuance of the system by the +/// imbalance amount. Used for leftover credit. +pub struct DecreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for DecreaseIssuance +{ + fn handle(asset: U::AssetId, amount: U::Balance) { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_sub(amount)) + } +} + +/// An imbalance type which uses `DecreaseIssuance` to deal with anything `Drop`ed. +/// +/// Basically means that funds in someone's account have been removed and not yet placed anywhere +/// else. If it gets dropped, then those funds will be assumed to be "burned" and the total supply +/// will be accordingly decreased to ensure it equals the sum of the balances of all accounts. +type Credit = Imbalance< + >::AssetId, + >::Balance, + DecreaseIssuance, + IncreaseIssuance, +>; + +/// An imbalance type which uses `IncreaseIssuance` to deal with anything `Drop`ed. +/// +/// Basically means that there are funds in someone's account whose origin is as yet unaccounted +/// for. If it gets dropped, then those funds will be assumed to be "minted" and the total supply +/// will be accordingly increased to ensure it equals the sum of the balances of all accounts. +type Debt = Imbalance< + >::AssetId, + >::Balance, + IncreaseIssuance, + DecreaseIssuance, +>; + +/// Create some `Credit` item. Only for internal use. +fn credit>( + asset: U::AssetId, + amount: U::Balance, +) -> Credit { + Imbalance::new(asset, amount) +} + +/// Create some `Debt` item. Only for internal use. +fn debt>( + asset: U::AssetId, + amount: U::Balance, +) -> Debt { + Imbalance::new(asset, amount) +} + +impl> Balanced for U { + type OnDropCredit = DecreaseIssuance; + type OnDropDebt = IncreaseIssuance; + fn rescind(asset: Self::AssetId, amount: Self::Balance) -> Debt { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_sub(amount)); + debt(asset, amount) + } + fn issue(asset: Self::AssetId, amount: Self::Balance) -> Credit { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_add(amount)); + credit(asset, amount) + } + fn slash( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> (Credit, Self::Balance) { + let slashed = U::decrease_balance_at_most(asset, who, amount); + // `slashed` could be less than, greater than or equal to `amount`. + // If slashed == amount, it means the account had at least amount in it and it could all be + // removed without a problem. + // If slashed > amount, it means the account had more than amount in it, but not enough more + // to push it over minimum_balance. + // If slashed < amount, it means the account didn't have enough in it to be reduced by + // `amount` without being destroyed. + (credit(asset, slashed), amount.saturating_sub(slashed)) + } + fn deposit( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance + ) -> Result, DispatchError> { + let increase = U::increase_balance(asset, who, amount)?; + Ok(debt(asset, increase)) + } + fn withdraw( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + //TODO: liveness: ExistenceRequirement, + ) -> Result, DispatchError> { + let decrease = U::decrease_balance(asset, who, amount)?; + Ok(credit(asset, decrease)) + } +} diff --git a/frame/support/src/traits/tokens/fungibles/imbalance.rs b/frame/support/src/traits/tokens/fungibles/imbalance.rs new file mode 100644 index 0000000000000..ecc415cb568bd --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/imbalance.rs @@ -0,0 +1,169 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The imbalance type and its associates, which handles keeps everything adding up properly with +//! unbalanced operations. + +use super::*; +use sp_std::marker::PhantomData; +use sp_runtime::traits::Zero; +use super::fungibles::{AssetId, Balance}; +use super::balanced::Balanced; +use crate::traits::misc::{TryDrop, SameOrOther}; + +/// Handler for when an imbalance gets dropped. This could handle either a credit (negative) or +/// debt (positive) imbalance. +pub trait HandleImbalanceDrop { + fn handle(asset: AssetId, amount: Balance); +} + +/// An imbalance in the system, representing a divergence of recorded token supply from the sum of +/// the balances of all accounts. This is `must_use` in order to ensure it gets handled (placing +/// into an account, settling from an account or altering the supply). +/// +/// Importantly, it has a special `Drop` impl, and cannot be created outside of this module. +#[must_use] +pub struct Imbalance< + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> { + asset: A, + amount: B, + _phantom: PhantomData<(OnDrop, OppositeOnDrop)>, +} + +impl< + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop +> Drop for Imbalance { + fn drop(&mut self) { + if !self.amount.is_zero() { + OnDrop::handle(self.asset, self.amount) + } + } +} + +impl< + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> TryDrop for Imbalance { + /// Drop an instance cleanly. Only works if its value represents "no-operation". + fn try_drop(self) -> Result<(), Self> { + self.drop_zero() + } +} + +impl< + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> Imbalance { + pub fn zero(asset: A) -> Self { + Self { asset, amount: Zero::zero(), _phantom: PhantomData } + } + + pub(crate) fn new(asset: A, amount: B) -> Self { + Self { asset, amount, _phantom: PhantomData } + } + + pub fn drop_zero(self) -> Result<(), Self> { + if self.amount.is_zero() { + sp_std::mem::forget(self); + Ok(()) + } else { + Err(self) + } + } + + pub fn split(self, amount: B) -> (Self, Self) { + let first = self.amount.min(amount); + let second = self.amount - first; + let asset = self.asset; + sp_std::mem::forget(self); + (Imbalance::new(asset, first), Imbalance::new(asset, second)) + } + pub fn merge(mut self, other: Self) -> Result { + if self.asset == other.asset { + self.amount = self.amount.saturating_add(other.amount); + sp_std::mem::forget(other); + Ok(self) + } else { + Err((self, other)) + } + } + pub fn subsume(&mut self, other: Self) -> Result<(), Self> { + if self.asset == other.asset { + self.amount = self.amount.saturating_add(other.amount); + sp_std::mem::forget(other); + Ok(()) + } else { + Err(other) + } + } + pub fn offset(self, other: Imbalance) -> Result< + SameOrOther>, + (Self, Imbalance), + > { + if self.asset == other.asset { + let (a, b) = (self.amount, other.amount); + let asset = self.asset; + sp_std::mem::forget((self, other)); + + if a == b { + Ok(SameOrOther::None) + } else if a > b { + Ok(SameOrOther::Same(Imbalance::new(asset, a - b))) + } else { + Ok(SameOrOther::Other(Imbalance::::new(asset, b - a))) + } + } else { + Err((self, other)) + } + } + pub fn peek(&self) -> B { + self.amount + } + + pub fn asset(&self) -> A { + self.asset + } +} + +/// Imbalance implying that the total_issuance value is less than the sum of all account balances. +pub type DebtOf = Imbalance< + >::AssetId, + >::Balance, + // This will generally be implemented by increasing the total_issuance value. + >::OnDropDebt, + >::OnDropCredit, +>; + +/// Imbalance implying that the total_issuance value is greater than the sum of all account balances. +pub type CreditOf = Imbalance< + >::AssetId, + >::Balance, + // This will generally be implemented by decreasing the total_issuance value. + >::OnDropCredit, + >::OnDropDebt, +>; diff --git a/frame/support/src/traits/tokens/imbalance.rs b/frame/support/src/traits/tokens/imbalance.rs new file mode 100644 index 0000000000000..9652b9a0275a1 --- /dev/null +++ b/frame/support/src/traits/tokens/imbalance.rs @@ -0,0 +1,174 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The imbalance trait type and its associates, which handles keeps everything adding up properly +//! with unbalanced operations. + +use sp_std::ops::Div; +use sp_runtime::traits::Saturating; +use crate::traits::misc::{TryDrop, SameOrOther}; + +mod split_two_ways; +mod signed_imbalance; +mod on_unbalanced; +pub use split_two_ways::SplitTwoWays; +pub use signed_imbalance::SignedImbalance; +pub use on_unbalanced::OnUnbalanced; + +/// A trait for a not-quite Linear Type that tracks an imbalance. +/// +/// Functions that alter account balances return an object of this trait to +/// express how much account balances have been altered in aggregate. If +/// dropped, the currency system will take some default steps to deal with +/// the imbalance (`balances` module simply reduces or increases its +/// total issuance). Your module should generally handle it in some way, +/// good practice is to do so in a configurable manner using an +/// `OnUnbalanced` type for each situation in which your module needs to +/// handle an imbalance. +/// +/// Imbalances can either be Positive (funds were added somewhere without +/// being subtracted elsewhere - e.g. a reward) or Negative (funds deducted +/// somewhere without an equal and opposite addition - e.g. a slash or +/// system fee payment). +/// +/// Since they are unsigned, the actual type is always Positive or Negative. +/// The trait makes no distinction except to define the `Opposite` type. +/// +/// New instances of zero value can be created (`zero`) and destroyed +/// (`drop_zero`). +/// +/// Existing instances can be `split` and merged either consuming `self` with +/// `merge` or mutating `self` with `subsume`. If the target is an `Option`, +/// then `maybe_merge` and `maybe_subsume` might work better. Instances can +/// also be `offset` with an `Opposite` that is less than or equal to in value. +/// +/// You can always retrieve the raw balance value using `peek`. +#[must_use] +pub trait Imbalance: Sized + TryDrop + Default { + /// The oppositely imbalanced type. They come in pairs. + type Opposite: Imbalance; + + /// The zero imbalance. Can be destroyed with `drop_zero`. + fn zero() -> Self; + + /// Drop an instance cleanly. Only works if its `self.value()` is zero. + fn drop_zero(self) -> Result<(), Self>; + + /// Consume `self` and return two independent instances; the first + /// is guaranteed to be at most `amount` and the second will be the remainder. + fn split(self, amount: Balance) -> (Self, Self); + + /// Consume `self` and return two independent instances; the amounts returned will be in + /// approximately the same ratio as `first`:`second`. + /// + /// NOTE: This requires up to `first + second` room for a multiply, and `first + second` should + /// fit into a `u32`. Overflow will safely saturate in both cases. + fn ration(self, first: u32, second: u32) -> (Self, Self) + where Balance: From + Saturating + Div + { + let total: u32 = first.saturating_add(second); + if total == 0 { return (Self::zero(), Self::zero()) } + let amount1 = self.peek().saturating_mul(first.into()) / total.into(); + self.split(amount1) + } + + /// Consume self and add its two components, defined by the first component's balance, + /// element-wise to two pre-existing Imbalances. + /// + /// A convenient replacement for `split` and `merge`. + fn split_merge(self, amount: Balance, others: (Self, Self)) -> (Self, Self) { + let (a, b) = self.split(amount); + (a.merge(others.0), b.merge(others.1)) + } + + /// Consume self and add its two components, defined by the ratio `first`:`second`, + /// element-wise to two pre-existing Imbalances. + /// + /// A convenient replacement for `split` and `merge`. + fn ration_merge(self, first: u32, second: u32, others: (Self, Self)) -> (Self, Self) + where Balance: From + Saturating + Div + { + let (a, b) = self.ration(first, second); + (a.merge(others.0), b.merge(others.1)) + } + + /// Consume self and add its two components, defined by the first component's balance, + /// element-wise into two pre-existing Imbalance refs. + /// + /// A convenient replacement for `split` and `subsume`. + fn split_merge_into(self, amount: Balance, others: &mut (Self, Self)) { + let (a, b) = self.split(amount); + others.0.subsume(a); + others.1.subsume(b); + } + + /// Consume self and add its two components, defined by the ratio `first`:`second`, + /// element-wise to two pre-existing Imbalances. + /// + /// A convenient replacement for `split` and `merge`. + fn ration_merge_into(self, first: u32, second: u32, others: &mut (Self, Self)) + where Balance: From + Saturating + Div + { + let (a, b) = self.ration(first, second); + others.0.subsume(a); + others.1.subsume(b); + } + + /// Consume `self` and an `other` to return a new instance that combines + /// both. + fn merge(self, other: Self) -> Self; + + /// Consume self to mutate `other` so that it combines both. Just like `subsume`, only with + /// reversed arguments. + fn merge_into(self, other: &mut Self) { + other.subsume(self) + } + + /// Consume `self` and maybe an `other` to return a new instance that combines + /// both. + fn maybe_merge(self, other: Option) -> Self { + if let Some(o) = other { + self.merge(o) + } else { + self + } + } + + /// Consume an `other` to mutate `self` into a new instance that combines + /// both. + fn subsume(&mut self, other: Self); + + /// Maybe consume an `other` to mutate `self` into a new instance that combines + /// both. + fn maybe_subsume(&mut self, other: Option) { + if let Some(o) = other { + self.subsume(o) + } + } + + /// Consume self and along with an opposite counterpart to return + /// a combined result. + /// + /// Returns `Ok` along with a new instance of `Self` if this instance has a + /// greater value than the `other`. Otherwise returns `Err` with an instance of + /// the `Opposite`. In both cases the value represents the combination of `self` + /// and `other`. + fn offset(self, other: Self::Opposite)-> SameOrOther; + + /// The raw value of self. + fn peek(&self) -> Balance; +} diff --git a/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs b/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs new file mode 100644 index 0000000000000..f3ecc14308e74 --- /dev/null +++ b/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs @@ -0,0 +1,50 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Trait for handling imbalances. + +use crate::traits::misc::TryDrop; + +/// Handler for when some currency "account" decreased in balance for +/// some reason. +/// +/// The only reason at present for an increase would be for validator rewards, but +/// there may be other reasons in the future or for other chains. +/// +/// Reasons for decreases include: +/// +/// - Someone got slashed. +/// - Someone paid for a transaction to be included. +pub trait OnUnbalanced { + /// Handler for some imbalances. The different imbalances might have different origins or + /// meanings, dependent on the context. Will default to simply calling on_unbalanced for all + /// of them. Infallible. + fn on_unbalanceds(amounts: impl Iterator) where Imbalance: crate::traits::Imbalance { + Self::on_unbalanced(amounts.fold(Imbalance::zero(), |i, x| x.merge(i))) + } + + /// Handler for some imbalance. Infallible. + fn on_unbalanced(amount: Imbalance) { + amount.try_drop().unwrap_or_else(Self::on_nonzero_unbalanced) + } + + /// Actually handle a non-zero imbalance. You probably want to implement this rather than + /// `on_unbalanced`. + fn on_nonzero_unbalanced(amount: Imbalance) { drop(amount); } +} + +impl OnUnbalanced for () {} diff --git a/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs b/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs new file mode 100644 index 0000000000000..e3523f86804fd --- /dev/null +++ b/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs @@ -0,0 +1,69 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Convenience type for managing an imbalance whose sign is unknown. + +use codec::FullCodec; +use sp_std::fmt::Debug; +use sp_runtime::traits::{AtLeast32BitUnsigned, MaybeSerializeDeserialize}; +use crate::traits::misc::SameOrOther; +use super::super::imbalance::Imbalance; + +/// Either a positive or a negative imbalance. +pub enum SignedImbalance>{ + /// A positive imbalance (funds have been created but none destroyed). + Positive(PositiveImbalance), + /// A negative imbalance (funds have been destroyed but none created). + Negative(PositiveImbalance::Opposite), +} + +impl< + P: Imbalance, + N: Imbalance, + B: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, +> SignedImbalance { + /// Create a `Positive` instance of `Self` whose value is zero. + pub fn zero() -> Self { + SignedImbalance::Positive(P::zero()) + } + + /// Drop `Self` if and only if it is equal to zero. Return `Err` with `Self` if not. + pub fn drop_zero(self) -> Result<(), Self> { + match self { + SignedImbalance::Positive(x) => x.drop_zero().map_err(SignedImbalance::Positive), + SignedImbalance::Negative(x) => x.drop_zero().map_err(SignedImbalance::Negative), + } + } + + /// Consume `self` and an `other` to return a new instance that combines + /// both. + pub fn merge(self, other: Self) -> Self { + match (self, other) { + (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => + SignedImbalance::Positive(one.merge(other)), + (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => + SignedImbalance::Negative(one.merge(other)), + (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => + match one.offset(other) { + SameOrOther::Same(positive) => SignedImbalance::Positive(positive), + SameOrOther::Other(negative) => SignedImbalance::Negative(negative), + SameOrOther::None => SignedImbalance::Positive(P::zero()), + }, + (one, other) => other.merge(one), + } + } +} diff --git a/frame/support/src/traits/tokens/imbalance/split_two_ways.rs b/frame/support/src/traits/tokens/imbalance/split_two_ways.rs new file mode 100644 index 0000000000000..f3f9870b62cd2 --- /dev/null +++ b/frame/support/src/traits/tokens/imbalance/split_two_ways.rs @@ -0,0 +1,51 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Means for splitting an imbalance into two and hanlding them differently. + +use sp_std::{ops::Div, marker::PhantomData}; +use sp_core::u32_trait::Value as U32; +use sp_runtime::traits::Saturating; +use super::super::imbalance::{Imbalance, OnUnbalanced}; + +/// Split an unbalanced amount two ways between a common divisor. +pub struct SplitTwoWays< + Balance, + Imbalance, + Part1, + Target1, + Part2, + Target2, +>(PhantomData<(Balance, Imbalance, Part1, Target1, Part2, Target2)>); + +impl< + Balance: From + Saturating + Div, + I: Imbalance, + Part1: U32, + Target1: OnUnbalanced, + Part2: U32, + Target2: OnUnbalanced, +> OnUnbalanced for SplitTwoWays +{ + fn on_nonzero_unbalanced(amount: I) { + let total: u32 = Part1::VALUE + Part2::VALUE; + let amount1 = amount.peek().saturating_mul(Part1::VALUE.into()) / total.into(); + let (imb1, imb2) = amount.split(amount1); + Target1::on_unbalanced(imb1); + Target2::on_unbalanced(imb2); + } +} diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs new file mode 100644 index 0000000000000..02f7ba384bd00 --- /dev/null +++ b/frame/support/src/traits/tokens/misc.rs @@ -0,0 +1,168 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Miscellaneous types. + +use codec::{Encode, Decode, FullCodec}; +use sp_core::RuntimeDebug; +use sp_arithmetic::traits::{Zero, AtLeast32BitUnsigned}; +use sp_runtime::TokenError; + +/// One of a number of consequences of withdrawing a fungible from an account. +#[derive(Copy, Clone, Eq, PartialEq)] +pub enum WithdrawConsequence { + /// Withdraw could not happen since the amount to be withdrawn is less than the total funds in + /// the account. + NoFunds, + /// The withdraw would mean the account dying when it needs to exist (usually because it is a + /// provider and there are consumer references on it). + WouldDie, + /// The asset is unknown. Usually because an `AssetId` has been presented which doesn't exist + /// on the system. + UnknownAsset, + /// There has been an underflow in the system. This is indicative of a corrupt state and + /// likely unrecoverable. + Underflow, + /// There has been an overflow in the system. This is indicative of a corrupt state and + /// likely unrecoverable. + Overflow, + /// Not enough of the funds in the account are unavailable for withdrawal. + Frozen, + /// Account balance would reduce to zero, potentially destroying it. The parameter is the + /// amount of balance which is destroyed. + ReducedToZero(Balance), + /// Account continued in existence. + Success, +} + +impl WithdrawConsequence { + /// Convert the type into a `Result` with `TokenError` as the error or the additional `Balance` + /// by which the account will be reduced. + pub fn into_result(self) -> Result { + use WithdrawConsequence::*; + match self { + NoFunds => Err(TokenError::NoFunds), + WouldDie => Err(TokenError::WouldDie), + UnknownAsset => Err(TokenError::UnknownAsset), + Underflow => Err(TokenError::Underflow), + Overflow => Err(TokenError::Overflow), + Frozen => Err(TokenError::Frozen), + ReducedToZero(result) => Ok(result), + Success => Ok(Zero::zero()), + } + } +} + +/// One of a number of consequences of withdrawing a fungible from an account. +#[derive(Copy, Clone, Eq, PartialEq)] +pub enum DepositConsequence { + /// Deposit couldn't happen due to the amount being too low. This is usually because the + /// account doesn't yet exist and the deposit wouldn't bring it to at least the minimum needed + /// for existance. + BelowMinimum, + /// Deposit cannot happen since the account cannot be created (usually because it's a consumer + /// and there exists no provider reference). + CannotCreate, + /// The asset is unknown. Usually because an `AssetId` has been presented which doesn't exist + /// on the system. + UnknownAsset, + /// An overflow would occur. This is practically unexpected, but could happen in test systems + /// with extremely small balance types or balances that approach the max value of the balance + /// type. + Overflow, + /// Account continued in existence. + Success, +} + +impl DepositConsequence { + /// Convert the type into a `Result` with `TokenError` as the error. + pub fn into_result(self) -> Result<(), TokenError> { + use DepositConsequence::*; + Err(match self { + BelowMinimum => TokenError::BelowMinimum, + CannotCreate => TokenError::CannotCreate, + UnknownAsset => TokenError::UnknownAsset, + Overflow => TokenError::Overflow, + Success => return Ok(()), + }) + } +} + +/// Simple boolean for whether an account needs to be kept in existence. +#[derive(Copy, Clone, Eq, PartialEq)] +pub enum ExistenceRequirement { + /// Operation must not result in the account going out of existence. + /// + /// Note this implies that if the account never existed in the first place, then the operation + /// may legitimately leave the account unchanged and still non-existent. + KeepAlive, + /// Operation may result in account going out of existence. + AllowDeath, +} + +/// Status of funds. +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] +pub enum BalanceStatus { + /// Funds are free, as corresponding to `free` item in Balances. + Free, + /// Funds are reserved, as corresponding to `reserved` item in Balances. + Reserved, +} + +bitflags::bitflags! { + /// Reasons for moving funds out of an account. + #[derive(Encode, Decode)] + pub struct WithdrawReasons: i8 { + /// In order to pay for (system) transaction costs. + const TRANSACTION_PAYMENT = 0b00000001; + /// In order to transfer ownership. + const TRANSFER = 0b00000010; + /// In order to reserve some funds for a later return or repatriation. + const RESERVE = 0b00000100; + /// In order to pay some other (higher-level) fees. + const FEE = 0b00001000; + /// In order to tip a validator for transaction inclusion. + const TIP = 0b00010000; + } +} + +impl WithdrawReasons { + /// Choose all variants except for `one`. + /// + /// ```rust + /// # use frame_support::traits::WithdrawReasons; + /// # fn main() { + /// assert_eq!( + /// WithdrawReasons::FEE | WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE | WithdrawReasons::TIP, + /// WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT), + /// ); + /// # } + /// ``` + pub fn except(one: WithdrawReasons) -> WithdrawReasons { + let mut flags = Self::all(); + flags.toggle(one); + flags + } +} + +/// Simple amalgamation trait to collect together properties for an AssetId under one roof. +pub trait AssetId: FullCodec + Copy + Default + Eq + PartialEq {} +impl AssetId for T {} + +/// Simple amalgamation trait to collect together properties for a Balance under one roof. +pub trait Balance: AtLeast32BitUnsigned + FullCodec + Copy + Default {} +impl Balance for T {} diff --git a/frame/support/src/traits/validation.rs b/frame/support/src/traits/validation.rs new file mode 100644 index 0000000000000..900be7bb8e7e2 --- /dev/null +++ b/frame/support/src/traits/validation.rs @@ -0,0 +1,242 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with validation and validators. + +use sp_std::prelude::*; +use codec::{Codec, Decode}; +use sp_runtime::traits::{Convert, Zero}; +use sp_runtime::{BoundToRuntimeAppPublic, ConsensusEngineId, Percent, RuntimeAppPublic}; +use sp_staking::SessionIndex; +use crate::dispatch::Parameter; +use crate::weights::Weight; + +/// A trait for online node inspection in a session. +/// +/// Something that can give information about the current validator set. +pub trait ValidatorSet { + /// Type for representing validator id in a session. + type ValidatorId: Parameter; + /// A type for converting `AccountId` to `ValidatorId`. + type ValidatorIdOf: Convert>; + + /// Returns current session index. + fn session_index() -> SessionIndex; + + /// Returns the active set of validators. + fn validators() -> Vec; +} + +/// [`ValidatorSet`] combined with an identification. +pub trait ValidatorSetWithIdentification: ValidatorSet { + /// Full identification of `ValidatorId`. + type Identification: Parameter; + /// A type for converting `ValidatorId` to `Identification`. + type IdentificationOf: Convert>; +} + +/// A trait for finding the author of a block header based on the `PreRuntime` digests contained +/// within it. +pub trait FindAuthor { + /// Find the author of a block based on the pre-runtime digests. + fn find_author<'a, I>(digests: I) -> Option + where I: 'a + IntoIterator; +} + +impl FindAuthor for () { + fn find_author<'a, I>(_: I) -> Option + where I: 'a + IntoIterator + { + None + } +} + +/// A trait for verifying the seal of a header and returning the author. +pub trait VerifySeal { + /// Verify a header and return the author, if any. + fn verify_seal(header: &Header) -> Result, &'static str>; +} + +/// A session handler for specific key type. +pub trait OneSessionHandler: BoundToRuntimeAppPublic { + /// The key type expected. + type Key: Decode + Default + RuntimeAppPublic; + + /// The given validator set will be used for the genesis session. + /// It is guaranteed that the given validator set will also be used + /// for the second session, therefore the first call to `on_new_session` + /// should provide the same validator set. + fn on_genesis_session<'a, I: 'a>(validators: I) + where I: Iterator, ValidatorId: 'a; + + /// Session set has changed; act appropriately. Note that this can be called + /// before initialization of your module. + /// + /// `changed` is true when at least one of the session keys + /// or the underlying economic identities/distribution behind one the + /// session keys has changed, false otherwise. + /// + /// The `validators` are the validators of the incoming session, and `queued_validators` + /// will follow. + fn on_new_session<'a, I: 'a>( + changed: bool, + validators: I, + queued_validators: I, + ) where I: Iterator, ValidatorId: 'a; + + /// A notification for end of the session. + /// + /// Note it is triggered before any `SessionManager::end_session` handlers, + /// so we can still affect the validator set. + fn on_before_session_ending() {} + + /// A validator got disabled. Act accordingly until a new session begins. + fn on_disabled(_validator_index: usize); +} + +/// Something that can estimate at which block the next session rotation will happen (i.e. a new +/// session starts). +/// +/// The accuracy of the estimates is dependent on the specific implementation, but in order to get +/// the best estimate possible these methods should be called throughout the duration of the session +/// (rather than calling once and storing the result). +/// +/// This should be the same logical unit that dictates `ShouldEndSession` to the session module. No +/// assumptions are made about the scheduling of the sessions. +pub trait EstimateNextSessionRotation { + /// Return the average length of a session. + /// + /// This may or may not be accurate. + fn average_session_length() -> BlockNumber; + + /// Return an estimate of the current session progress. + /// + /// None should be returned if the estimation fails to come to an answer. + fn estimate_current_session_progress(now: BlockNumber) -> (Option, Weight); + + /// Return the block number at which the next session rotation is estimated to happen. + /// + /// None should be returned if the estimation fails to come to an answer. + fn estimate_next_session_rotation(now: BlockNumber) -> (Option, Weight); +} + +impl EstimateNextSessionRotation for () { + fn average_session_length() -> BlockNumber { + Zero::zero() + } + + fn estimate_current_session_progress(_: BlockNumber) -> (Option, Weight) { + (None, Zero::zero()) + } + + fn estimate_next_session_rotation(_: BlockNumber) -> (Option, Weight) { + (None, Zero::zero()) + } +} + +/// Something that can estimate at which block scheduling of the next session will happen (i.e when +/// we will try to fetch new validators). +/// +/// This only refers to the point when we fetch the next session details and not when we enact them +/// (for enactment there's `EstimateNextSessionRotation`). With `pallet-session` this should be +/// triggered whenever `SessionManager::new_session` is called. +/// +/// For example, if we are using a staking module this would be the block when the session module +/// would ask staking what the next validator set will be, as such this must always be implemented +/// by the session module. +pub trait EstimateNextNewSession { + /// Return the average length of a session. + /// + /// This may or may not be accurate. + fn average_session_length() -> BlockNumber; + + /// Return the block number at which the next new session is estimated to happen. + /// + /// None should be returned if the estimation fails to come to an answer. + fn estimate_next_new_session(_: BlockNumber) -> (Option, Weight); +} + +impl EstimateNextNewSession for () { + fn average_session_length() -> BlockNumber { + Zero::zero() + } + + fn estimate_next_new_session(_: BlockNumber) -> (Option, Weight) { + (None, Zero::zero()) + } +} + +/// Something which can compute and check proofs of +/// a historical key owner and return full identification data of that +/// key owner. +pub trait KeyOwnerProofSystem { + /// The proof of membership itself. + type Proof: Codec; + /// The full identification of a key owner and the stash account. + type IdentificationTuple: Codec; + + /// Prove membership of a key owner in the current block-state. + /// + /// This should typically only be called off-chain, since it may be + /// computationally heavy. + /// + /// Returns `Some` iff the key owner referred to by the given `key` is a + /// member of the current set. + fn prove(key: Key) -> Option; + + /// Check a proof of membership on-chain. Return `Some` iff the proof is + /// valid and recent enough to check. + fn check_proof(key: Key, proof: Self::Proof) -> Option; +} + +impl KeyOwnerProofSystem for () { + // The proof and identification tuples is any bottom type to guarantee that the methods of this + // implementation can never be called or return anything other than `None`. + type Proof = crate::Void; + type IdentificationTuple = crate::Void; + + fn prove(_key: Key) -> Option { + None + } + + fn check_proof(_key: Key, _proof: Self::Proof) -> Option { + None + } +} + +/// Trait to be used by block producing consensus engine modules to determine +/// how late the current block is (e.g. in a slot-based proposal mechanism how +/// many slots were skipped since the previous block). +pub trait Lateness { + /// Returns a generic measure of how late the current block is compared to + /// its parent. + fn lateness(&self) -> N; +} + +impl Lateness for () { + fn lateness(&self) -> N { + Zero::zero() + } +} + +/// Implementors of this trait provide information about whether or not some validator has +/// been registered with them. The [Session module](../../pallet_session/index.html) is an implementor. +pub trait ValidatorRegistration { + /// Returns true if the provided validator ID has been registered with the implementing runtime + /// module + fn is_registered(id: &ValidatorId) -> bool; +} diff --git a/frame/support/src/traits/voting.rs b/frame/support/src/traits/voting.rs new file mode 100644 index 0000000000000..b6913a182d30b --- /dev/null +++ b/frame/support/src/traits/voting.rs @@ -0,0 +1,88 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and associated data structures concerned with voting, and moving between tokens and +//! votes. + +use sp_arithmetic::traits::{UniqueSaturatedInto, UniqueSaturatedFrom, SaturatedConversion}; + +/// A trait similar to `Convert` to convert values from `B` an abstract balance type +/// into u64 and back from u128. (This conversion is used in election and other places where complex +/// calculation over balance type is needed) +/// +/// Total issuance of the currency is passed in, but an implementation of this trait may or may not +/// use it. +/// +/// # WARNING +/// +/// the total issuance being passed in implies that the implementation must be aware of the fact +/// that its values can affect the outcome. This implies that if the vote value is dependent on the +/// total issuance, it should never ber written to storage for later re-use. +pub trait CurrencyToVote { + /// Convert balance to u64. + fn to_vote(value: B, issuance: B) -> u64; + + /// Convert u128 to balance. + fn to_currency(value: u128, issuance: B) -> B; +} + +/// An implementation of `CurrencyToVote` tailored for chain's that have a balance type of u128. +/// +/// The factor is the `(total_issuance / u64::max()).max(1)`, represented as u64. Let's look at the +/// important cases: +/// +/// If the chain's total issuance is less than u64::max(), this will always be 1, which means that +/// the factor will not have any effect. In this case, any account's balance is also less. Thus, +/// both of the conversions are basically an `as`; Any balance can fit in u64. +/// +/// If the chain's total issuance is more than 2*u64::max(), then a factor might be multiplied and +/// divided upon conversion. +pub struct U128CurrencyToVote; + +impl U128CurrencyToVote { + fn factor(issuance: u128) -> u128 { + (issuance / u64::max_value() as u128).max(1) + } +} + +impl CurrencyToVote for U128CurrencyToVote { + fn to_vote(value: u128, issuance: u128) -> u64 { + (value / Self::factor(issuance)).saturated_into() + } + + fn to_currency(value: u128, issuance: u128) -> u128 { + value.saturating_mul(Self::factor(issuance)) + } +} + + +/// A naive implementation of `CurrencyConvert` that simply saturates all conversions. +/// +/// # Warning +/// +/// This is designed to be used mostly for testing. Use with care, and think about the consequences. +pub struct SaturatingCurrencyToVote; + +impl + UniqueSaturatedFrom> CurrencyToVote for SaturatingCurrencyToVote { + fn to_vote(value: B, _: B) -> u64 { + value.unique_saturated_into() + } + + fn to_currency(value: u128, _: B) -> B { + B::unique_saturated_from(value) + } +} diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index abd54994bc9e8..840b1c3c01ac9 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -130,11 +130,8 @@ #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use codec::{Encode, Decode}; -use sp_runtime::{ - RuntimeDebug, - traits::SignedExtension, - generic::{CheckedExtrinsic, UncheckedExtrinsic}, -}; +use sp_runtime::{RuntimeDebug, traits::SignedExtension}; +use sp_runtime::generic::{CheckedExtrinsic, UncheckedExtrinsic}; use crate::dispatch::{DispatchErrorWithPostInfo, DispatchResultWithPostInfo, DispatchError}; use sp_runtime::traits::SaturatedConversion; use sp_arithmetic::{Perbill, traits::{BaseArithmetic, Saturating, Unsigned}}; @@ -964,13 +961,13 @@ mod tests { smallvec![ WeightToFeeCoefficient { coeff_integer: 0, - coeff_frac: Perbill::from_fraction(0.5), + coeff_frac: Perbill::from_float(0.5), negative: false, degree: 3 }, WeightToFeeCoefficient { coeff_integer: 2, - coeff_frac: Perbill::from_rational_approximation(1u32, 3u32), + coeff_frac: Perbill::from_rational(1u32, 3u32), negative: false, degree: 2 }, diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 17aeea970c057..7d2f0ec463a38 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -42,3 +42,4 @@ std = [ "sp-state-machine", ] try-runtime = ["frame-support/try-runtime"] +conditional-storage = [] diff --git a/frame/support/test/src/lib.rs b/frame/support/test/src/lib.rs index ae3efdf57aa22..d40031c149d90 100644 --- a/frame/support/test/src/lib.rs +++ b/frame/support/test/src/lib.rs @@ -67,7 +67,7 @@ where ( Output::decode(&mut TrailingZeroInput::new(subject)).unwrap_or_default(), - frame_system::Module::::block_number(), + frame_system::Pallet::::block_number(), ) } } diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 8dc44c2024adc..a1ec744e42733 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -138,17 +138,17 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event, Origin} = 30, - Module1_1: module1::::{Module, Call, Storage, Event, Origin}, - Module2: module2::{Module, Call, Storage, Event, Origin}, - Module1_2: module1::::{Module, Call, Storage, Event, Origin}, - Module1_3: module1::::{Module, Storage} = 6, - Module1_4: module1::::{Module, Call} = 3, - Module1_5: module1::::{Module, Event}, - Module1_6: module1::::{Module, Call, Storage, Event, Origin} = 1, - Module1_7: module1::::{Module, Call, Storage, Event, Origin}, - Module1_8: module1::::{Module, Call, Storage, Event, Origin} = 12, - Module1_9: module1::::{Module, Call, Storage, Event, Origin}, + System: system::{Pallet, Call, Event, Origin} = 30, + Module1_1: module1::::{Pallet, Call, Storage, Event, Origin}, + Module2: module2::{Pallet, Call, Storage, Event, Origin}, + Module1_2: module1::::{Pallet, Call, Storage, Event, Origin}, + Module1_3: module1::::{Pallet, Storage} = 6, + Module1_4: module1::::{Pallet, Call} = 3, + Module1_5: module1::::{Pallet, Event}, + Module1_6: module1::::{Pallet, Call, Storage, Event, Origin} = 1, + Module1_7: module1::::{Pallet, Call, Storage, Event, Origin}, + Module1_8: module1::::{Pallet, Call, Storage, Event, Origin} = 12, + Module1_9: module1::::{Pallet, Call, Storage, Event, Origin}, } ); diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_index.stderr b/frame/support/test/tests/construct_runtime_ui/conflicting_index.stderr index 65368666c88fe..2e2028fd1b862 100644 --- a/frame/support/test/tests/construct_runtime_ui/conflicting_index.stderr +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_index.stderr @@ -1,10 +1,10 @@ -error: Module indices are conflicting: Both modules System and Pallet1 are at index 0 +error: Pallet indices are conflicting: Both pallets System and Pallet1 are at index 0 --> $DIR/conflicting_index.rs:9:3 | 9 | System: system::{}, | ^^^^^^ -error: Module indices are conflicting: Both modules System and Pallet1 are at index 0 +error: Pallet indices are conflicting: Both pallets System and Pallet1 are at index 0 --> $DIR/conflicting_index.rs:10:3 | 10 | Pallet1: pallet1::{} = 0, diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.stderr b/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.stderr index b792ff5d2a541..bfa3706a456a4 100644 --- a/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.stderr +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.stderr @@ -1,10 +1,10 @@ -error: Module indices are conflicting: Both modules System and Pallet3 are at index 5 +error: Pallet indices are conflicting: Both pallets System and Pallet3 are at index 5 --> $DIR/conflicting_index_2.rs:9:3 | 9 | System: system::{} = 5, | ^^^^^^ -error: Module indices are conflicting: Both modules System and Pallet3 are at index 5 +error: Pallet indices are conflicting: Both pallets System and Pallet3 are at index 5 --> $DIR/conflicting_index_2.rs:12:3 | 12 | Pallet3: pallet3::{}, diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs index bc242a57a41e5..7cc6cbd6bd6e2 100644 --- a/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs @@ -6,9 +6,9 @@ construct_runtime! { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module}, - Balance: balances::{Module}, - Balance: balances::{Module}, + System: system::{Pallet}, + Balance: balances::{Pallet}, + Balance: balances::{Pallet}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr index f5b999db66a41..27c5644e0d736 100644 --- a/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr @@ -1,11 +1,11 @@ -error: Two modules with the same name! +error: Two pallets with the same name! --> $DIR/conflicting_module_name.rs:10:3 | -10 | Balance: balances::{Module}, +10 | Balance: balances::{Pallet}, | ^^^^^^^ -error: Two modules with the same name! +error: Two pallets with the same name! --> $DIR/conflicting_module_name.rs:11:3 | -11 | Balance: balances::{Module}, +11 | Balance: balances::{Pallet}, | ^^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/double_module_parts.rs b/frame/support/test/tests/construct_runtime_ui/double_module_parts.rs index ec37456e58e79..836af597851d8 100644 --- a/frame/support/test/tests/construct_runtime_ui/double_module_parts.rs +++ b/frame/support/test/tests/construct_runtime_ui/double_module_parts.rs @@ -6,7 +6,7 @@ construct_runtime! { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module}, + System: system::{Pallet}, Balance: balances::{Config, Call, Config, Origin}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.rs b/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.rs index b79d73ff5c022..b3f0d340d671f 100644 --- a/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.rs +++ b/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.rs @@ -6,7 +6,7 @@ construct_runtime! { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module}, + System: system::{Pallet}, Balance: balances::::{Call, Origin}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr b/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr index fe880549211bc..06caa036b91ff 100644 --- a/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr +++ b/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr @@ -1,4 +1,4 @@ -error: `Call` is not allowed to have generics. Only the following modules are allowed to have generics: `Event`, `Origin`, `Config`. +error: `Call` is not allowed to have generics. Only the following pallets are allowed to have generics: `Event`, `Origin`, `Config`. --> $DIR/generics_in_invalid_module.rs:10:36 | 10 | Balance: balances::::{Call, Origin}, diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr index 66c9fc95cb546..29df6e4bd8cb5 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr @@ -1,4 +1,4 @@ -error: expected one of: `Module`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned` +error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned` --> $DIR/invalid_module_details_keyword.rs:9:20 | 9 | system: System::{enum}, diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.rs b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.rs index 3754d41d6e81c..e7d32559a6cc6 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.rs +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.rs @@ -6,7 +6,7 @@ construct_runtime! { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module}, + System: system::{Pallet}, Balance: balances::{Error}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr index 7442c6be3a9a3..bd3e672dc8b40 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr @@ -1,4 +1,4 @@ -error: expected one of: `Module`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned` +error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned` --> $DIR/invalid_module_entry.rs:10:23 | 10 | Balance: balances::{Error}, diff --git a/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs b/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs index 5eb7df5d18c20..f748e643aa18a 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs +++ b/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs @@ -6,7 +6,7 @@ construct_runtime! { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module}, + System: system::{Pallet}, Balance: balances::::{Event}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.stderr b/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.stderr index f80b4bd66abdd..b1aa9b86cd0d6 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.stderr +++ b/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.stderr @@ -1,4 +1,4 @@ -error: Instantiable module with no generic `Event` cannot be constructed: module `Balance` must have generic `Event` +error: Instantiable pallet with no generic `Event` cannot be constructed: pallet `Balance` must have generic `Event` --> $DIR/missing_event_generic_on_module_with_instance.rs:10:3 | 10 | Balance: balances::::{Event}, diff --git a/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs b/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs index 5e44ae84d87c6..7053acc185900 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs +++ b/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs @@ -6,7 +6,7 @@ construct_runtime! { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module}, + System: system::{Pallet}, Balance: balances::::{Origin}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.stderr b/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.stderr index 0f7d36aafb863..63bb7442a8576 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.stderr +++ b/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.stderr @@ -1,4 +1,4 @@ -error: Instantiable module with no generic `Origin` cannot be constructed: module `Balance` must have generic `Origin` +error: Instantiable pallet with no generic `Origin` cannot be constructed: pallet `Balance` must have generic `Origin` --> $DIR/missing_origin_generic_on_module_with_instance.rs:10:3 | 10 | Balance: balances::::{Origin}, diff --git a/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr b/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr index 2ebe0721eb381..7648f5c1bfb33 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr +++ b/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr @@ -1,7 +1,6 @@ -error: `System` module declaration is missing. Please add this line: `System: frame_system::{Module, Call, Storage, Config, Event},` +error: `System` pallet declaration is missing. Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},` --> $DIR/missing_system_module.rs:8:2 | -8 | { - | _____^ +8 | / { 9 | | } | |_____^ diff --git a/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.stderr b/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.stderr index c0ef5c8e60b9e..2e055f5d3726a 100644 --- a/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.stderr +++ b/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.stderr @@ -1,4 +1,4 @@ -error: Module index doesn't fit into u8, index is 256 +error: Pallet index doesn't fit into u8, index is 256 --> $DIR/more_than_256_modules.rs:10:3 | 10 | Pallet256: pallet256::{}, diff --git a/frame/support/test/tests/derive_no_bound_ui/eq.stderr b/frame/support/test/tests/derive_no_bound_ui/eq.stderr index bbd907adecb33..36384178d469b 100644 --- a/frame/support/test/tests/derive_no_bound_ui/eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/eq.stderr @@ -4,4 +4,9 @@ error[E0277]: can't compare `Foo` with `Foo` 6 | struct Foo { | ^^^ no implementation for `Foo == Foo` | + ::: $RUST/core/src/cmp.rs + | + | pub trait Eq: PartialEq { + | --------------- required by this bound in `Eq` + | = help: the trait `PartialEq` is not implemented for `Foo` diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 42cc2af19c655..e0dd1d1891d26 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -264,24 +264,24 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event}, + System: system::{Pallet, Call, Event}, Module1_1: module1::::{ - Module, Call, Storage, Event, Config, Origin, Inherent + Pallet, Call, Storage, Event, Config, Origin, Inherent }, Module1_2: module1::::{ - Module, Call, Storage, Event, Config, Origin, Inherent + Pallet, Call, Storage, Event, Config, Origin, Inherent }, - Module2: module2::{Module, Call, Storage, Event, Config, Origin, Inherent}, + Module2: module2::{Pallet, Call, Storage, Event, Config, Origin, Inherent}, Module2_1: module2::::{ - Module, Call, Storage, Event, Config, Origin, Inherent + Pallet, Call, Storage, Event, Config, Origin, Inherent }, Module2_2: module2::::{ - Module, Call, Storage, Event, Config, Origin, Inherent + Pallet, Call, Storage, Event, Config, Origin, Inherent }, Module2_3: module2::::{ - Module, Call, Storage, Event, Config, Origin, Inherent + Pallet, Call, Storage, Event, Config, Origin, Inherent }, - Module3: module3::{Module, Call}, + Module3: module3::{Pallet, Call}, } ); diff --git a/frame/support/test/tests/issue2219.rs b/frame/support/test/tests/issue2219.rs index 9ad9b8be7f415..4525e8c1a1fe2 100644 --- a/frame/support/test/tests/issue2219.rs +++ b/frame/support/test/tests/issue2219.rs @@ -177,8 +177,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event}, - Module: module::{Module, Call, Storage, Config}, + System: system::{Pallet, Call, Event}, + Module: module::{Pallet, Call, Storage, Config}, } ); diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index a31ce9d91ae2d..5387312819c87 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -217,6 +217,28 @@ pub mod pallet { #[pallet::storage] pub type DoubleMap2 = StorageDoubleMap<_, Twox64Concat, u16, Blake2_128Concat, u32, u64>; + #[pallet::storage] + #[pallet::getter(fn conditional_value)] + #[cfg(feature = "conditional-storage")] + pub type ConditionalValue = StorageValue<_, u32>; + + #[cfg(feature = "conditional-storage")] + #[pallet::storage] + #[pallet::getter(fn conditional_map)] + pub type ConditionalMap = StorageMap<_, Twox64Concat, u16, u32>; + + #[cfg(feature = "conditional-storage")] + #[pallet::storage] + #[pallet::getter(fn conditional_double_map)] + pub type ConditionalDoubleMap = StorageDoubleMap< + _, + Blake2_128Concat, + u8, + Twox64Concat, + u16, + u32, + >; + #[pallet::genesis_config] #[derive(Default)] pub struct GenesisConfig { @@ -396,9 +418,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Module, Call, Event}, - Example: pallet::{Module, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned}, - Example2: pallet2::{Module, Call, Event, Config, Storage}, + System: frame_system::{Pallet, Call, Event}, + Example: pallet::{Pallet, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned}, + Example2: pallet2::{Pallet, Call, Event, Config, Storage}, } ); @@ -522,6 +544,13 @@ fn storage_expand() { k.extend(2u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + + #[cfg(feature = "conditional-storage")] + { + pallet::ConditionalValue::::put(1); + pallet::ConditionalMap::::insert(1, 2); + pallet::ConditionalDoubleMap::::insert(1, 2, 3); + } }) } @@ -530,11 +559,11 @@ fn pallet_hooks_expand() { TestExternalities::default().execute_with(|| { frame_system::Pallet::::set_block_number(1); - assert_eq!(AllModules::on_initialize(1), 10); - AllModules::on_finalize(1); + assert_eq!(AllPallets::on_initialize(1), 10); + AllPallets::on_finalize(1); assert_eq!(pallet::Pallet::::storage_version(), None); - assert_eq!(AllModules::on_runtime_upgrade(), 30); + assert_eq!(AllPallets::on_runtime_upgrade(), 30); assert_eq!( pallet::Pallet::::storage_version(), Some(pallet::Pallet::::current_version()), @@ -646,6 +675,38 @@ fn metadata() { default: DecodeDifferent::Decoded(vec![0]), documentation: DecodeDifferent::Decoded(vec![]), }, + #[cfg(feature = "conditional-storage")] StorageEntryMetadata { + name: DecodeDifferent::Decoded("ConditionalValue".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Decoded("u32".to_string())), + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + #[cfg(feature = "conditional-storage")] StorageEntryMetadata { + name: DecodeDifferent::Decoded("ConditionalMap".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: DecodeDifferent::Decoded("u16".to_string()), + value: DecodeDifferent::Decoded("u32".to_string()), + hasher: StorageHasher::Twox64Concat, + unused: false, + }, + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + #[cfg(feature = "conditional-storage")] StorageEntryMetadata { + name: DecodeDifferent::Decoded("ConditionalDoubleMap".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::DoubleMap { + value: DecodeDifferent::Decoded("u32".to_string()), + key1: DecodeDifferent::Decoded("u8".to_string()), + key2: DecodeDifferent::Decoded("u16".to_string()), + hasher: StorageHasher::Blake2_128Concat, + key2_hasher: StorageHasher::Twox64Concat, + }, + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, ]), })), calls: Some(DecodeDifferent::Decoded(vec![ diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index 5b9001e0475fe..95e1c027eb3fa 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -247,10 +247,10 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Module, Call, Event}, + System: frame_system::{Pallet, Call, Event}, // NOTE: name Example here is needed in order to have same module prefix - Example: pallet::{Module, Call, Event, Config, Storage}, - PalletOld: pallet_old::{Module, Call, Event, Config, Storage}, + Example: pallet::{Pallet, Call, Event, Config, Storage}, + PalletOld: pallet_old::{Pallet, Call, Event, Config, Storage}, } ); diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index d7de03ea46cfd..603c583ae217f 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -259,13 +259,13 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Module, Call, Event}, - Example: pallet::{Module, Call, Event, Config, Storage}, - PalletOld: pallet_old::{Module, Call, Event, Config, Storage}, - Instance2Example: pallet::::{Module, Call, Event, Config, Storage}, - PalletOld2: pallet_old::::{Module, Call, Event, Config, Storage}, - Instance3Example: pallet::::{Module, Call, Event, Config, Storage}, - PalletOld3: pallet_old::::{Module, Call, Event, Config, Storage}, + System: frame_system::{Pallet, Call, Event}, + Example: pallet::{Pallet, Call, Event, Config, Storage}, + PalletOld: pallet_old::{Pallet, Call, Event, Config, Storage}, + Instance2Example: pallet::::{Pallet, Call, Event, Config, Storage}, + PalletOld2: pallet_old::::{Pallet, Call, Event, Config, Storage}, + Instance3Example: pallet::::{Pallet, Call, Event, Config, Storage}, + PalletOld3: pallet_old::::{Pallet, Call, Event, Config, Storage}, } ); diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 62654d53e19d7..1bf4c1af09280 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -288,13 +288,13 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Module, Call, Event}, - Example: pallet::{Module, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned}, + System: frame_system::{Pallet, Call, Event}, + Example: pallet::{Pallet, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned}, Instance1Example: pallet::::{ - Module, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned + Pallet, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned }, - Example2: pallet2::{Module, Call, Event, Config, Storage}, - Instance1Example2: pallet2::::{Module, Call, Event, Config, Storage}, + Example2: pallet2::{Pallet, Call, Event, Config, Storage}, + Instance1Example2: pallet2::::{Pallet, Call, Event, Config, Storage}, } ); @@ -377,19 +377,19 @@ fn instance_expand() { #[test] fn pallet_expand_deposit_event() { TestExternalities::default().execute_with(|| { - frame_system::Module::::set_block_number(1); + frame_system::Pallet::::set_block_number(1); pallet::Call::::foo(3).dispatch_bypass_filter(None.into()).unwrap(); assert_eq!( - frame_system::Module::::events()[0].event, + frame_system::Pallet::::events()[0].event, Event::pallet(pallet::Event::Something(3)), ); }); TestExternalities::default().execute_with(|| { - frame_system::Module::::set_block_number(1); + frame_system::Pallet::::set_block_number(1); pallet::Call::::foo(3).dispatch_bypass_filter(None.into()).unwrap(); assert_eq!( - frame_system::Module::::events()[0].event, + frame_system::Pallet::::events()[0].event, Event::pallet_Instance1(pallet::Event::Something(3)), ); }); @@ -480,14 +480,14 @@ fn storage_expand() { #[test] fn pallet_hooks_expand() { TestExternalities::default().execute_with(|| { - frame_system::Module::::set_block_number(1); + frame_system::Pallet::::set_block_number(1); - assert_eq!(AllModules::on_initialize(1), 21); - AllModules::on_finalize(1); + assert_eq!(AllPallets::on_initialize(1), 21); + AllPallets::on_finalize(1); assert_eq!(pallet::Pallet::::storage_version(), None); assert_eq!(pallet::Pallet::::storage_version(), None); - assert_eq!(AllModules::on_runtime_upgrade(), 61); + assert_eq!(AllPallets::on_runtime_upgrade(), 61); assert_eq!( pallet::Pallet::::storage_version(), Some(pallet::Pallet::::current_version()), @@ -499,27 +499,27 @@ fn pallet_hooks_expand() { // The order is indeed reversed due to https://github.com/paritytech/substrate/issues/6280 assert_eq!( - frame_system::Module::::events()[0].event, + frame_system::Pallet::::events()[0].event, Event::pallet_Instance1(pallet::Event::Something(11)), ); assert_eq!( - frame_system::Module::::events()[1].event, + frame_system::Pallet::::events()[1].event, Event::pallet(pallet::Event::Something(10)), ); assert_eq!( - frame_system::Module::::events()[2].event, + frame_system::Pallet::::events()[2].event, Event::pallet_Instance1(pallet::Event::Something(21)), ); assert_eq!( - frame_system::Module::::events()[3].event, + frame_system::Pallet::::events()[3].event, Event::pallet(pallet::Event::Something(20)), ); assert_eq!( - frame_system::Module::::events()[4].event, + frame_system::Pallet::::events()[4].event, Event::pallet_Instance1(pallet::Event::Something(31)), ); assert_eq!( - frame_system::Module::::events()[5].event, + frame_system::Pallet::::events()[5].event, Event::pallet(pallet::Event::Something(30)), ); }) diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr index a2998788736ac..8a6ee8b8f5045 100644 --- a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr @@ -1,10 +1,10 @@ error[E0277]: the trait bound `pallet::GenesisConfig: std::default::Default` is not satisfied - --> $DIR/genesis_default_not_satisfied.rs:22:18 - | -22 | impl GenesisBuild for GenesisConfig {} - | ^^^^^^^^^^^^^^^ the trait `std::default::Default` is not implemented for `pallet::GenesisConfig` - | - ::: $WORKSPACE/frame/support/src/traits.rs - | - | pub trait GenesisBuild: Default + MaybeSerializeDeserialize { - | ------- required by this bound in `GenesisBuild` + --> $DIR/genesis_default_not_satisfied.rs:22:18 + | +22 | impl GenesisBuild for GenesisConfig {} + | ^^^^^^^^^^^^^^^ the trait `std::default::Default` is not implemented for `pallet::GenesisConfig` + | + ::: $WORKSPACE/frame/support/src/traits/hooks.rs + | + | pub trait GenesisBuild: Default + MaybeSerializeDeserialize { + | ------- required by this bound in `GenesisBuild` diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index 0379448f694fc..3812b433e20ca 100644 --- a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -1,5 +1,15 @@ -error[E0107]: wrong number of type arguments: expected 1, found 0 - --> $DIR/hooks_invalid_item.rs:12:18 - | -12 | impl Hooks for Pallet {} - | ^^^^^ expected 1 type argument +error[E0107]: missing generics for trait `Hooks` + --> $DIR/hooks_invalid_item.rs:12:18 + | +12 | impl Hooks for Pallet {} + | ^^^^^ expected 1 type argument + | +note: trait defined here, with 1 type parameter: `BlockNumber` + --> $DIR/hooks.rs:206:11 + | +206 | pub trait Hooks { + | ^^^^^ ----------- +help: use angle brackets to add missing type argument + | +12 | impl Hooks for Pallet {} + | ^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_version.rs b/frame/support/test/tests/pallet_version.rs index 4cc93d395db2a..b3436b7baed9a 100644 --- a/frame/support/test/tests/pallet_version.rs +++ b/frame/support/test/tests/pallet_version.rs @@ -174,15 +174,15 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Module, Call, Event}, - Module1: module1::{Module, Call}, - Module2: module2::{Module, Call}, - Module2_1: module2::::{Module, Call}, - Module2_2: module2::::{Module, Call}, - Pallet3: pallet3::{Module, Call}, - Pallet4: pallet4::{Module, Call}, - Pallet4_1: pallet4::::{Module, Call}, - Pallet4_2: pallet4::::{Module, Call}, + System: frame_system::{Pallet, Call, Event}, + Module1: module1::{Pallet, Call}, + Module2: module2::{Pallet, Call}, + Module2_1: module2::::{Pallet, Call}, + Module2_2: module2::::{Pallet, Call}, + Pallet3: pallet3::{Pallet, Call}, + Pallet4: pallet4::{Pallet, Call}, + Pallet4_1: pallet4::::{Pallet, Call}, + Pallet4_2: pallet4::::{Pallet, Call}, } ); @@ -218,7 +218,7 @@ fn check_pallet_version(pallet: &str) { #[test] fn on_runtime_upgrade_sets_the_pallet_versions_in_storage() { sp_io::TestExternalities::new_empty().execute_with(|| { - AllModules::on_runtime_upgrade(); + AllPallets::on_runtime_upgrade(); check_pallet_version("Module1"); check_pallet_version("Module2"); @@ -237,7 +237,7 @@ fn on_runtime_upgrade_overwrites_old_version() { let key = get_pallet_version_storage_key_for_pallet("Module2"); sp_io::storage::set(&key, &SOME_TEST_VERSION.encode()); - AllModules::on_runtime_upgrade(); + AllPallets::on_runtime_upgrade(); check_pallet_version("Module1"); check_pallet_version("Module2"); diff --git a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs index b09beb04cd17c..05cedbdb91a07 100644 --- a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs +++ b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -109,8 +109,8 @@ mod tests { NodeBlock = TestBlock, UncheckedExtrinsic = TestUncheckedExtrinsic { - System: frame_system::{Module, Call, Config, Storage, Event}, - PalletTest: pallet_test::{Module, Call, Storage, Event, Config, ValidateUnsigned, Inherent}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + PalletTest: pallet_test::{Pallet, Call, Storage, Event, Config, ValidateUnsigned, Inherent}, } ); diff --git a/frame/system/README.md b/frame/system/README.md index 106a16bc209d6..a6da7c3816d22 100644 --- a/frame/system/README.md +++ b/frame/system/README.md @@ -64,8 +64,8 @@ decl_module! { #[weight = 0] pub fn system_module_example(origin) -> dispatch::DispatchResult { let _sender = ensure_signed(origin)?; - let _extrinsic_count = >::extrinsic_count(); - let _parent_hash = >::parent_hash(); + let _extrinsic_count = >::extrinsic_count(); + let _parent_hash = >::parent_hash(); Ok(()) } } diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 6ed3d456826c2..3ebee534a64e1 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -50,8 +50,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Module: module::{Module, Call, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Module: module::{Pallet, Call, Event}, } ); diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index bdb34e7944db5..7146bcd60645b 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -30,11 +30,11 @@ use frame_support::{ traits::Get, weights::DispatchClass, }; -use frame_system::{Module as System, Call, RawOrigin, DigestItemOf}; +use frame_system::{Pallet as System, Call, RawOrigin, DigestItemOf}; mod mock; -pub struct Module(System); +pub struct Pallet(System); pub trait Config: frame_system::Config {} benchmarks! { @@ -145,7 +145,7 @@ benchmarks! { } impl_benchmark_test_suite!( - Module, + Pallet, crate::mock::new_test_ext(), crate::mock::Test, ); diff --git a/frame/system/benchmarking/src/mock.rs b/frame/system/benchmarking/src/mock.rs index edc5dfebbd106..23da1fee5617a 100644 --- a/frame/system/benchmarking/src/mock.rs +++ b/frame/system/benchmarking/src/mock.rs @@ -34,7 +34,7 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, } ); diff --git a/frame/system/src/extensions/check_genesis.rs b/frame/system/src/extensions/check_genesis.rs index de635b4fb91a6..aa6c1358790a4 100644 --- a/frame/system/src/extensions/check_genesis.rs +++ b/frame/system/src/extensions/check_genesis.rs @@ -16,7 +16,7 @@ // limitations under the License. use codec::{Encode, Decode}; -use crate::{Config, Module}; +use crate::{Config, Pallet}; use sp_runtime::{ traits::{SignedExtension, Zero}, transaction_validity::TransactionValidityError, @@ -53,6 +53,6 @@ impl SignedExtension for CheckGenesis { const IDENTIFIER: &'static str = "CheckGenesis"; fn additional_signed(&self) -> Result { - Ok(>::block_hash(T::BlockNumber::zero())) + Ok(>::block_hash(T::BlockNumber::zero())) } } diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs index 1e8eb32a3d3c2..b3e4c4ecfda86 100644 --- a/frame/system/src/extensions/check_mortality.rs +++ b/frame/system/src/extensions/check_mortality.rs @@ -16,7 +16,7 @@ // limitations under the License. use codec::{Encode, Decode}; -use crate::{Config, Module, BlockHash}; +use crate::{Config, Pallet, BlockHash}; use sp_runtime::{ generic::Era, traits::{SignedExtension, DispatchInfoOf, SaturatedConversion}, @@ -62,7 +62,7 @@ impl SignedExtension for CheckMortality { _info: &DispatchInfoOf, _len: usize, ) -> TransactionValidity { - let current_u64 = >::block_number().saturated_into::(); + let current_u64 = >::block_number().saturated_into::(); let valid_till = self.0.death(current_u64); Ok(ValidTransaction { longevity: valid_till.saturating_sub(current_u64), @@ -71,12 +71,12 @@ impl SignedExtension for CheckMortality { } fn additional_signed(&self) -> Result { - let current_u64 = >::block_number().saturated_into::(); + let current_u64 = >::block_number().saturated_into::(); let n = self.0.birth(current_u64).saturated_into::(); if !>::contains_key(n) { Err(InvalidTransaction::AncientBirthBlock.into()) } else { - Ok(>::block_hash(n)) + Ok(>::block_hash(n)) } } } diff --git a/frame/system/src/extensions/check_spec_version.rs b/frame/system/src/extensions/check_spec_version.rs index 1fd8376d342b2..e41ce1725a549 100644 --- a/frame/system/src/extensions/check_spec_version.rs +++ b/frame/system/src/extensions/check_spec_version.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Config, Module}; +use crate::{Config, Pallet}; use codec::{Encode, Decode}; use sp_runtime::{ traits::SignedExtension, @@ -53,6 +53,6 @@ impl SignedExtension for CheckSpecVersion { const IDENTIFIER: &'static str = "CheckSpecVersion"; fn additional_signed(&self) -> Result { - Ok(>::runtime_version().spec_version) + Ok(>::runtime_version().spec_version) } } diff --git a/frame/system/src/extensions/check_tx_version.rs b/frame/system/src/extensions/check_tx_version.rs index fa11a0a5727f1..ad23dc7e9dd05 100644 --- a/frame/system/src/extensions/check_tx_version.rs +++ b/frame/system/src/extensions/check_tx_version.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Config, Module}; +use crate::{Config, Pallet}; use codec::{Encode, Decode}; use sp_runtime::{ traits::SignedExtension, @@ -53,6 +53,6 @@ impl SignedExtension for CheckTxVersion { const IDENTIFIER: &'static str = "CheckTxVersion"; fn additional_signed(&self) -> Result { - Ok(>::runtime_version().transaction_version) + Ok(>::runtime_version().transaction_version) } } diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 70116f4b6524b..fc9898b778b8d 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{limits::BlockWeights, Config, Module}; +use crate::{limits::BlockWeights, Config, Pallet}; use codec::{Encode, Decode}; use sp_runtime::{ traits::{SignedExtension, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, Printable}, @@ -58,7 +58,7 @@ impl CheckWeight where info: &DispatchInfoOf, ) -> Result { let maximum_weight = T::BlockWeights::get(); - let all_weight = Module::::block_weight(); + let all_weight = Pallet::::block_weight(); calculate_consumed_weight::(maximum_weight, all_weight, info) } @@ -70,7 +70,7 @@ impl CheckWeight where len: usize, ) -> Result { let length_limit = T::BlockLength::get(); - let current_len = Module::::all_extrinsics_len(); + let current_len = Pallet::::all_extrinsics_len(); let added_len = len as u32; let next_len = current_len.saturating_add(added_len); if next_len > *length_limit.max.get(info.class) { diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index ce9ab0dddc105..9d3ecd6f41f5d 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1022,7 +1022,7 @@ pub enum IncRefError { NoProviders, } -impl Module { +impl Pallet { pub fn account_exists(who: &T::AccountId) -> bool { Account::::contains_key(who) } @@ -1083,7 +1083,7 @@ impl Module { (1, 0, 0) => { // No providers left (and no consumers) and no sufficients. Account dead. - Module::::on_killed_account(who.clone()); + Pallet::::on_killed_account(who.clone()); Ok(DecRefStatus::Reaped) } (1, c, _) if c > 0 => { @@ -1136,7 +1136,7 @@ impl Module { } match (account.sufficients, account.providers) { (0, 0) | (1, 0) => { - Module::::on_killed_account(who.clone()); + Pallet::::on_killed_account(who.clone()); DecRefStatus::Reaped } (x, _) => { @@ -1201,11 +1201,22 @@ impl Module { Account::::get(who).consumers } - /// True if the account has some outstanding references. + /// True if the account has some outstanding consumer references. pub fn is_provider_required(who: &T::AccountId) -> bool { Account::::get(who).consumers != 0 } + /// True if the account has no outstanding consumer references or more than one provider. + pub fn can_dec_provider(who: &T::AccountId) -> bool { + let a = Account::::get(who); + a.consumers == 0 || a.providers > 1 + } + + /// True if the account has at least one provider reference. + pub fn can_inc_consumer(who: &T::AccountId) -> bool { + Account::::get(who).providers > 0 + } + /// Deposits an event into this block's event record. pub fn deposit_event(event: impl Into) { Self::deposit_event_indexed(&[], event.into()); @@ -1449,7 +1460,12 @@ impl Module { match r { Ok(_) => Event::ExtrinsicSuccess(info), Err(err) => { - sp_runtime::print(err); + log::trace!( + target: "runtime::system", + "Extrinsic failed at block({:?}): {:?}", + Self::block_number(), + err, + ); Event::ExtrinsicFailed(err.error, info) }, } @@ -1515,11 +1531,11 @@ impl Module { pub struct Provider(PhantomData); impl HandleLifetime for Provider { fn created(t: &T::AccountId) -> Result<(), StoredMapError> { - Module::::inc_providers(t); + Pallet::::inc_providers(t); Ok(()) } fn killed(t: &T::AccountId) -> Result<(), StoredMapError> { - Module::::dec_providers(t) + Pallet::::dec_providers(t) .map(|_| ()) .or_else(|e| match e { DecRefError::ConsumerRemaining => Err(StoredMapError::ConsumerRemaining), @@ -1531,11 +1547,11 @@ impl HandleLifetime for Provider { pub struct SelfSufficient(PhantomData); impl HandleLifetime for SelfSufficient { fn created(t: &T::AccountId) -> Result<(), StoredMapError> { - Module::::inc_sufficients(t); + Pallet::::inc_sufficients(t); Ok(()) } fn killed(t: &T::AccountId) -> Result<(), StoredMapError> { - Module::::dec_sufficients(t); + Pallet::::dec_sufficients(t); Ok(()) } } @@ -1544,13 +1560,13 @@ impl HandleLifetime for SelfSufficient { pub struct Consumer(PhantomData); impl HandleLifetime for Consumer { fn created(t: &T::AccountId) -> Result<(), StoredMapError> { - Module::::inc_consumers(t) + Pallet::::inc_consumers(t) .map_err(|e| match e { IncRefError::NoProviders => StoredMapError::NoProviders }) } fn killed(t: &T::AccountId) -> Result<(), StoredMapError> { - Module::::dec_consumers(t); + Pallet::::dec_consumers(t); Ok(()) } } diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index 2b31929b5da81..43c7d8d252774 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -33,7 +33,7 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, } ); diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index aa8bce966192e..fe601f995ce51 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -38,10 +38,10 @@ //! //! To be able to use signing, the following trait should be implemented: //! -//! - [`AppCrypto`](./trait.AppCrypto.html): where an application-specific key -//! is defined and can be used by this module's helpers for signing. -//! - [`CreateSignedTransaction`](./trait.CreateSignedTransaction.html): where -//! the manner in which the transaction is constructed is defined. +//! - [`AppCrypto`](./trait.AppCrypto.html): where an application-specific key is defined and can be +//! used by this module's helpers for signing. +//! - [`CreateSignedTransaction`](./trait.CreateSignedTransaction.html): where the manner in which +//! the transaction is constructed is defined. //! //! #### Submit an unsigned transaction with a signed payload //! @@ -53,7 +53,6 @@ //! #### Submit a signed transaction //! //! [`Signer`](./struct.Signer.html) can be used to sign/verify payloads -//! #![warn(missing_docs)] @@ -473,7 +472,7 @@ pub trait SendTransactionTypes { /// The runtime's call type. /// /// This has additional bound to be able to be created from pallet-local `Call` types. - type OverarchingCall: From; + type OverarchingCall: From + codec::Encode; } /// Create signed transaction. diff --git a/frame/timestamp/src/benchmarking.rs b/frame/timestamp/src/benchmarking.rs index b3e8eca889cb0..d64fa8dc691c7 100644 --- a/frame/timestamp/src/benchmarking.rs +++ b/frame/timestamp/src/benchmarking.rs @@ -24,7 +24,7 @@ use frame_system::RawOrigin; use frame_support::{ensure, traits::OnFinalize}; use frame_benchmarking::{benchmarks, TrackedStorageKey, impl_benchmark_test_suite}; -use crate::Module as Timestamp; +use crate::Pallet as Timestamp; const MAX_TIME: u32 = 100; diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 2ef24a696ade9..d467551196850 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -19,9 +19,9 @@ //! //! The Timestamp pallet provides functionality to get and set the on-chain time. //! -//! - [`timestamp::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) -//! - [`Pallet`](./struct.Pallet.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! ## Overview //! @@ -319,8 +319,8 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, } ); diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs index 442df89428fcc..6d85df33f10c9 100644 --- a/frame/tips/src/lib.rs +++ b/frame/tips/src/lib.rs @@ -291,7 +291,8 @@ decl_module! { Reasons::::remove(&tip.reason); Tips::::remove(&hash); if !tip.deposit.is_zero() { - let _ = T::Currency::unreserve(&who, tip.deposit); + let err_amount = T::Currency::unreserve(&who, tip.deposit); + debug_assert!(err_amount.is_zero()); } Self::deposit_event(RawEvent::TipRetracted(hash)); } @@ -401,7 +402,7 @@ decl_module! { let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; - ensure!(system::Module::::block_number() >= *n, Error::::Premature); + ensure!(system::Pallet::::block_number() >= *n, Error::::Premature); // closed. Reasons::::remove(&tip.reason); Tips::::remove(hash); @@ -463,7 +464,7 @@ impl Module { Self::retain_active_tips(&mut tip.tips); let threshold = (T::Tippers::count() + 1) / 2; if tip.tips.len() >= threshold && tip.closes.is_none() { - tip.closes = Some(system::Module::::block_number() + T::TipCountdown::get()); + tip.closes = Some(system::Pallet::::block_number() + T::TipCountdown::get()); true } else { false @@ -505,7 +506,8 @@ impl Module { let mut payout = tips[tips.len() / 2].1.min(max_payout); if !tip.deposit.is_zero() { - let _ = T::Currency::unreserve(&tip.finder, tip.deposit); + let err_amount = T::Currency::unreserve(&tip.finder, tip.deposit); + debug_assert!(err_amount.is_zero()); } if tip.finders_fee && tip.finder != tip.who { @@ -514,11 +516,13 @@ impl Module { payout -= finders_fee; // this should go through given we checked it's at most the free balance, but still // we only make a best-effort. - let _ = T::Currency::transfer(&treasury, &tip.finder, finders_fee, KeepAlive); + let res = T::Currency::transfer(&treasury, &tip.finder, finders_fee, KeepAlive); + debug_assert!(res.is_ok()); } // same as above: best-effort only. - let _ = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive); + let res = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive); + debug_assert!(res.is_ok()); Self::deposit_event(RawEvent::TipClosed(hash, tip.who, payout)); } diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs index 413e2dd9437e2..ef30962fc846f 100644 --- a/frame/tips/src/tests.rs +++ b/frame/tips/src/tests.rs @@ -40,10 +40,10 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Treasury: pallet_treasury::{Module, Call, Storage, Config, Event}, - TipsModTestInst: tips::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, + TipsModTestInst: tips::{Pallet, Call, Storage, Event}, } ); @@ -125,7 +125,7 @@ parameter_types! { } impl pallet_treasury::Config for Test { type ModuleId = TreasuryModuleId; - type Currency = pallet_balances::Module; + type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type Event = Event; diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 709a8f69a487d..b2dc2c9859e0b 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -178,7 +178,7 @@ impl Convert for TargetedFeeAdjustment>::block_weight(); + let current_block_weight = >::block_weight(); let normal_block_weight = *current_block_weight .get(DispatchClass::Normal) .min(&normal_max_weight); @@ -303,7 +303,7 @@ decl_module! { target += addition; sp_io::TestExternalities::new_empty().execute_with(|| { - >::set_block_consumed_resources(target, 0); + >::set_block_consumed_resources(target, 0); let next = T::FeeMultiplierUpdate::convert(min_value); assert!(next > min_value, "The minimum bound of the multiplier is too low. When \ block saturation is more than target by 1% and multiplier is minimal then \ @@ -630,9 +630,9 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Module, Storage}, + System: system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, } ); diff --git a/frame/transaction-payment/src/payment.rs b/frame/transaction-payment/src/payment.rs index f84b19d78c297..7292ef4dfee7e 100644 --- a/frame/transaction-payment/src/payment.rs +++ b/frame/transaction-payment/src/payment.rs @@ -117,6 +117,7 @@ where // merge the imbalance caused by paying the fees and refunding parts of it again. let adjusted_paid = paid .offset(refund_imbalance) + .same() .map_err(|_| TransactionValidityError::Invalid(InvalidTransaction::Payment))?; // Call someone else to handle the imbalance (fee and tip separately) let imbalances = adjusted_paid.split(tip); diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index b5e2c7881bb5f..cef50706b5173 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -20,8 +20,8 @@ //! The Treasury module provides a "pot" of funds that can be managed by stakeholders in the system //! and a structure for making spending proposals from this pot. //! -//! - [`treasury::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! @@ -53,7 +53,7 @@ //! //! ## GenesisConfig //! -//! The Treasury module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). +//! The Treasury module depends on the [`GenesisConfig`]. #![cfg_attr(not(feature = "std"), no_std)] @@ -377,7 +377,8 @@ impl, I: Instance> Module { >::remove(index); // return their deposit. - let _ = T::Currency::unreserve(&p.proposer, p.bond); + let err_amount = T::Currency::unreserve(&p.proposer, p.bond); + debug_assert!(err_amount.is_zero()); // provide the allocation. imbalance.subsume(T::Currency::deposit_creating(&p.beneficiary, p.value)); diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 3c70099843ea8..45fc3e629fb0b 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -43,9 +43,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Treasury: treasury::{Module, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Treasury: treasury::{Pallet, Call, Storage, Config, Event}, } ); @@ -105,7 +105,7 @@ parameter_types! { } impl Config for Test { type ModuleId = TreasuryModuleId; - type Currency = pallet_balances::Module; + type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type Event = Event; diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 79fb569c77a5c..de7f48d625c54 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -26,7 +26,7 @@ use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark const SEED: u32 = 0; fn assert_last_event(generic_event: ::Event) { - let events = frame_system::Module::::events(); + let events = frame_system::Pallet::::events(); let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; @@ -70,7 +70,7 @@ benchmarks! { } impl_benchmark_test_suite!( - Module, + Pallet, crate::tests::new_test_ext(), crate::tests::Test, ); diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 28345e5ffe72d..983d24c74dbee 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -15,15 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Utility Module -//! A stateless module with helpers for dispatch management which does no re-authentication. +//! # Utility Pallet +//! A stateless pallet with helpers for dispatch management which does no re-authentication. //! -//! - [`utility::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! -//! This module contains two basic pieces of functionality: +//! This pallet contains two basic pieces of functionality: //! - Batch dispatch: A stateless operation, allowing any origin to execute multiple calls in a //! single dispatch. This can be useful to amalgamate proposals, combining `set_code` with //! corresponding `set_storage`s, for efficient multiple payouts with just a single signature @@ -34,9 +34,9 @@ //! need multiple distinct accounts (e.g. as controllers for many staking accounts), but where //! it's perfectly fine to have each of them controlled by the same underlying keypair. //! Derivative accounts are, for the purposes of proxy filtering considered exactly the same as -//! the oigin and are thus hampered with the origin's filters. +//! the origin and are thus hampered with the origin's filters. //! -//! Since proxy filters are respected in all dispatches of this module, it should never need to be +//! Since proxy filters are respected in all dispatches of this pallet, it should never need to be //! filtered by any proxy. //! //! ## Interface @@ -48,9 +48,6 @@ //! //! #### For pseudonymal dispatch //! * `as_derivative` - Dispatch a call from a derivative signed origin. -//! -//! [`Call`]: ./enum.Call.html -//! [`Config`]: ./trait.Config.html // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -63,36 +60,45 @@ use sp_std::prelude::*; use codec::{Encode, Decode}; use sp_core::TypeId; use sp_io::hashing::blake2_256; -use frame_support::{decl_module, decl_event, decl_storage, Parameter, transactional}; use frame_support::{ - traits::{OriginTrait, UnfilteredDispatchable, Get}, - weights::{Weight, GetDispatchInfo, DispatchClass, extract_actual_weight}, - dispatch::{PostDispatchInfo, DispatchResultWithPostInfo}, + transactional, + traits::{OriginTrait, UnfilteredDispatchable}, + weights::{GetDispatchInfo, extract_actual_weight}, + dispatch::PostDispatchInfo, }; -use frame_system::{ensure_signed, ensure_root}; -use sp_runtime::{DispatchError, traits::Dispatchable}; +use sp_runtime::traits::Dispatchable; pub use weights::WeightInfo; -/// Configuration trait. -pub trait Config: frame_system::Config { - /// The overarching event type. - type Event: From + Into<::Event>; +pub use pallet::*; - /// The overarching call type. - type Call: Parameter + Dispatchable - + GetDispatchInfo + From> - + UnfilteredDispatchable; +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); -decl_storage! { - trait Store for Module as Utility {} -} -decl_event! { - /// Events type. + /// Configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From + IsType<::Event>; + + /// The overarching call type. + type Call: Parameter + Dispatchable + + GetDispatchInfo + From> + + UnfilteredDispatchable; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Batch of dispatches did not complete fully. Index of first failing dispatch given, as /// well as the error. \[index, error\] @@ -100,21 +106,12 @@ decl_event! { /// Batch of dispatches completed fully with no error. BatchCompleted, } -} -/// A module identifier. These are per module and should be stored in a registry somewhere. -#[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] -struct IndexedUtilityModuleId(u16); - -impl TypeId for IndexedUtilityModuleId { - const TYPE_ID: [u8; 4] = *b"suba"; -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Deposit one of this module's events by using the default implementation. - fn deposit_event() = default; + #[pallet::hooks] + impl Hooks> for Pallet {} + #[pallet::call] + impl Pallet { /// Send a batch of dispatch calls. /// /// May be called from any origin. @@ -133,7 +130,7 @@ decl_module! { /// `BatchInterrupted` event is deposited, along with the number of successful calls made /// and the error of the failed call. If all were successful, then the `BatchCompleted` /// event is deposited. - #[weight = { + #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) @@ -150,8 +147,11 @@ decl_module! { } }; (dispatch_weight, dispatch_class) - }] - fn batch(origin, calls: Vec<::Call>) -> DispatchResultWithPostInfo { + })] + pub fn batch( + origin: OriginFor, + calls: Vec<::Call>, + ) -> DispatchResultWithPostInfo { let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); // Track the actual weight of each of the batch calls. @@ -192,7 +192,7 @@ decl_module! { /// NOTE: Prior to version *12, this was called `as_limited_sub`. /// /// The dispatch origin for this call must be _Signed_. - #[weight = { + #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::as_derivative() @@ -201,8 +201,12 @@ decl_module! { .saturating_add(T::DbWeight::get().reads_writes(1, 1)), dispatch_info.class, ) - }] - fn as_derivative(origin, index: u16, call: Box<::Call>) -> DispatchResultWithPostInfo { + })] + pub fn as_derivative( + origin: OriginFor, + index: u16, + call: Box<::Call>, + ) -> DispatchResultWithPostInfo { let mut origin = origin; let who = ensure_signed(origin.clone())?; let pseudonym = Self::derivative_account_id(who, index); @@ -232,7 +236,7 @@ decl_module! { /// # /// - Complexity: O(C) where C is the number of calls to be batched. /// # - #[weight = { + #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) @@ -249,9 +253,12 @@ decl_module! { } }; (dispatch_weight, dispatch_class) - }] + })] #[transactional] - fn batch_all(origin, calls: Vec<::Call>) -> DispatchResultWithPostInfo { + pub fn batch_all( + origin: OriginFor, + calls: Vec<::Call>, + ) -> DispatchResultWithPostInfo { let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); // Track the actual weight of each of the batch calls. @@ -279,9 +286,18 @@ decl_module! { Ok(Some(base_weight + weight).into()) } } + +} + +/// A pallet identifier. These are per pallet and should be stored in a registry somewhere. +#[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] +struct IndexedUtilityPalletId(u16); + +impl TypeId for IndexedUtilityPalletId { + const TYPE_ID: [u8; 4] = *b"suba"; } -impl Module { +impl Pallet { /// Derive a derivative account ID from the owner account and the sub-account index. pub fn derivative_account_id(who: T::AccountId, index: u16) -> T::AccountId { let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index af31bbe96cbc4..3a8089519fac5 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -22,7 +22,7 @@ use super::*; use frame_support::{ - assert_ok, assert_noop, parameter_types, assert_err_ignore_postinfo, + assert_ok, assert_noop, parameter_types, assert_err_ignore_postinfo, decl_module, weights::{Weight, Pays}, dispatch::{DispatchError, DispatchErrorWithPostInfo, Dispatchable}, traits::Filter, @@ -35,7 +35,8 @@ use crate as utility; // example module to test behaviors. pub mod example { use super::*; - use frame_support::dispatch::WithPostDispatchInfo; + use frame_system::ensure_signed; + use frame_support::dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}; pub trait Config: frame_system::Config { } decl_module! { @@ -75,10 +76,10 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Utility: utility::{Module, Call, Event}, - Example: example::{Module, Call}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Utility: utility::{Pallet, Call, Event}, + Example: example::{Pallet, Call}, } ); @@ -170,7 +171,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { } fn last_event() -> Event { - frame_system::Module::::events().pop().map(|e| e.event).expect("Event expected") + frame_system::Pallet::::events().pop().map(|e| e.event).expect("Event expected") } fn expect_event>(e: E) { diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 937f2b033d847..8d16a53fba2c1 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -21,11 +21,11 @@ use super::*; -use frame_system::{RawOrigin, Module as System}; +use frame_system::{RawOrigin, Pallet as System}; use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; -use crate::Module as Vesting; +use crate::Pallet as Vesting; const SEED: u32 = 0; diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 9cf9166b37c0c..c02e9dc78c13e 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -15,15 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Vesting Module +//! # Vesting Pallet //! -//! - [`vesting::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! -//! A simple module providing a means of placing a linear curve on an account's locked balance. This -//! module ensures that there is a lock in place preventing the balance to drop below the *unvested* +//! A simple pallet providing a means of placing a linear curve on an account's locked balance. This +//! pallet ensures that there is a lock in place preventing the balance to drop below the *unvested* //! amount for any reason other than transaction fee payment. //! //! As the amount vested increases over time, the amount unvested reduces. However, locks remain in @@ -34,16 +34,13 @@ //! //! ## Interface //! -//! This module implements the `VestingSchedule` trait. +//! This pallet implements the `VestingSchedule` trait. //! //! ### Dispatchable Functions //! //! - `vest` - Update the lock, reducing it in line with the amount "vested" so far. //! - `vest_other` - Update the lock of another account, reducing it in line with the amount //! "vested" so far. -//! -//! [`Call`]: ./enum.Call.html -//! [`Config`]: ./trait.Config.html #![cfg_attr(not(feature = "std"), no_std)] @@ -53,37 +50,21 @@ pub mod weights; use sp_std::prelude::*; use sp_std::fmt::Debug; use codec::{Encode, Decode}; -use sp_runtime::{DispatchResult, RuntimeDebug, traits::{ +use sp_runtime::{RuntimeDebug, traits::{ StaticLookup, Zero, AtLeast32BitUnsigned, MaybeSerializeDeserialize, Convert }}; -use frame_support::{decl_module, decl_event, decl_storage, decl_error, ensure}; +use frame_support::{ensure, pallet_prelude::*}; use frame_support::traits::{ Currency, LockableCurrency, VestingSchedule, WithdrawReasons, LockIdentifier, ExistenceRequirement, Get, }; -use frame_system::{ensure_signed, ensure_root}; +use frame_system::{ensure_signed, ensure_root, pallet_prelude::*}; pub use weights::WeightInfo; +pub use pallet::*; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type MaxLocksOf = <::Currency as LockableCurrency<::AccountId>>::MaxLocks; -pub trait Config: frame_system::Config { - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// The currency trait. - type Currency: LockableCurrency; - - /// Convert the block number into a balance. - type BlockNumberToBalance: Convert>; - - /// The minimum amount transferred to call `vested_transfer`. - type MinVestedTransfer: Get>; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} - const VESTING_ID: LockIdentifier = *b"vesting "; /// Struct to encode the vesting schedule of an individual account. @@ -119,23 +100,68 @@ impl< } } -decl_storage! { - trait Store for Module as Vesting { - /// Information regarding the vesting of a given account. - pub Vesting get(fn vesting): - map hasher(blake2_128_concat) T::AccountId - => Option, T::BlockNumber>>; +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// The currency trait. + type Currency: LockableCurrency; + + /// Convert the block number into a balance. + type BlockNumberToBalance: Convert>; + + /// The minimum amount transferred to call `vested_transfer`. + #[pallet::constant] + type MinVestedTransfer: Get>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } - add_extra_genesis { - config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, BalanceOf)>; - build(|config: &GenesisConfig| { + + /// Information regarding the vesting of a given account. + #[pallet::storage] + #[pallet::getter(fn vesting)] + pub type Vesting = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + VestingInfo, T::BlockNumber>, + >; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub vesting: Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, BalanceOf)>, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { + vesting: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { use sp_runtime::traits::Saturating; + // Generate initial vesting configuration // * who - Account which we are generating vesting configuration for // * begin - Block when the account will start to vest // * length - Number of blocks from `begin` until fully vested // * liquid - Number of units which can be spent before vesting begins - for &(ref who, begin, length, liquid) in config.vesting.iter() { + for &(ref who, begin, length, liquid) in self.vesting.iter() { let balance = T::Currency::free_balance(who); assert!(!balance.is_zero(), "Currencies must be init'd before vesting"); // Total genesis `balance` minus `liquid` equals funds locked for vesting @@ -151,24 +177,24 @@ decl_storage! { let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; T::Currency::set_lock(VESTING_ID, who, locked, reasons); } - }) + } } -} -decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + #[pallet::metadata(T::AccountId = "AccountId", BalanceOf = "Balance")] + pub enum Event { /// The amount vested has been updated. This could indicate more funds are available. The /// balance given is the amount which is left unvested (and thus locked). /// \[account, unvested\] - VestingUpdated(AccountId, Balance), + VestingUpdated(T::AccountId, BalanceOf), /// An \[account\] has become fully vested. No further vesting can happen. - VestingCompleted(AccountId), + VestingCompleted(T::AccountId), } -); -decl_error! { - /// Error for the vesting module. - pub enum Error for Module { + /// Error for the vesting pallet. + #[pallet::error] + pub enum Error { /// The account given is not vesting. NotVesting, /// An existing vesting schedule already exists for this account that cannot be clobbered. @@ -176,22 +202,16 @@ decl_error! { /// Amount being transferred is too low to create a vesting schedule. AmountLow, } -} - -decl_module! { - /// Vesting module declaration. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// The minimum amount to be transferred to create a new vesting schedule. - const MinVestedTransfer: BalanceOf = T::MinVestedTransfer::get(); - fn deposit_event() = default; + #[pallet::hooks] + impl Hooks> for Pallet {} + #[pallet::call] + impl Pallet { /// Unlock any vested funds of the sender account. /// /// The dispatch origin for this call must be _Signed_ and the sender must have funds still - /// locked under this module. + /// locked under this pallet. /// /// Emits either `VestingCompleted` or `VestingUpdated`. /// @@ -201,10 +221,10 @@ decl_module! { /// - Reads: Vesting Storage, Balances Locks, [Sender Account] /// - Writes: Vesting Storage, Balances Locks, [Sender Account] /// # - #[weight = T::WeightInfo::vest_locked(MaxLocksOf::::get()) + #[pallet::weight(T::WeightInfo::vest_locked(MaxLocksOf::::get()) .max(T::WeightInfo::vest_unlocked(MaxLocksOf::::get())) - ] - fn vest(origin) -> DispatchResult { + )] + pub fn vest(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; Self::update_lock(who) } @@ -214,7 +234,7 @@ decl_module! { /// The dispatch origin for this call must be _Signed_. /// /// - `target`: The account whose vested funds should be unlocked. Must have funds still - /// locked under this module. + /// locked under this pallet. /// /// Emits either `VestingCompleted` or `VestingUpdated`. /// @@ -224,10 +244,10 @@ decl_module! { /// - Reads: Vesting Storage, Balances Locks, Target Account /// - Writes: Vesting Storage, Balances Locks, Target Account /// # - #[weight = T::WeightInfo::vest_other_locked(MaxLocksOf::::get()) + #[pallet::weight(T::WeightInfo::vest_other_locked(MaxLocksOf::::get()) .max(T::WeightInfo::vest_other_unlocked(MaxLocksOf::::get())) - ] - fn vest_other(origin, target: ::Source) -> DispatchResult { + )] + pub fn vest_other(origin: OriginFor, target: ::Source) -> DispatchResult { ensure_signed(origin)?; Self::update_lock(T::Lookup::lookup(target)?) } @@ -248,9 +268,9 @@ decl_module! { /// - Reads: Vesting Storage, Balances Locks, Target Account, [Sender Account] /// - Writes: Vesting Storage, Balances Locks, Target Account, [Sender Account] /// # - #[weight = T::WeightInfo::vested_transfer(MaxLocksOf::::get())] + #[pallet::weight(T::WeightInfo::vested_transfer(MaxLocksOf::::get()))] pub fn vested_transfer( - origin, + origin: OriginFor, target: ::Source, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { @@ -285,9 +305,9 @@ decl_module! { /// - Reads: Vesting Storage, Balances Locks, Target Account, Source Account /// - Writes: Vesting Storage, Balances Locks, Target Account, Source Account /// # - #[weight = T::WeightInfo::force_vested_transfer(MaxLocksOf::::get())] + #[pallet::weight(T::WeightInfo::force_vested_transfer(MaxLocksOf::::get()))] pub fn force_vested_transfer( - origin, + origin: OriginFor, source: ::Source, target: ::Source, schedule: VestingInfo, T::BlockNumber>, @@ -309,28 +329,28 @@ decl_module! { } } -impl Module { - /// (Re)set or remove the module's currency lock on `who`'s account in accordance with their +impl Pallet { + /// (Re)set or remove the pallet's currency lock on `who`'s account in accordance with their /// current unvested amount. fn update_lock(who: T::AccountId) -> DispatchResult { let vesting = Self::vesting(&who).ok_or(Error::::NotVesting)?; - let now = >::block_number(); + let now = >::block_number(); let locked_now = vesting.locked_at::(now); if locked_now.is_zero() { T::Currency::remove_lock(VESTING_ID, &who); Vesting::::remove(&who); - Self::deposit_event(RawEvent::VestingCompleted(who)); + Self::deposit_event(Event::::VestingCompleted(who)); } else { let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; T::Currency::set_lock(VESTING_ID, &who, locked_now, reasons); - Self::deposit_event(RawEvent::VestingUpdated(who, locked_now)); + Self::deposit_event(Event::::VestingUpdated(who, locked_now)); } Ok(()) } } -impl VestingSchedule for Module where +impl VestingSchedule for Pallet where BalanceOf: MaybeSerializeDeserialize + Debug { type Moment = T::BlockNumber; @@ -339,7 +359,7 @@ impl VestingSchedule for Module where /// Get the amount that is currently being vested and cannot be transferred out of this account. fn vesting_balance(who: &T::AccountId) -> Option> { if let Some(v) = Self::vesting(who) { - let now = >::block_number(); + let now = >::block_number(); let locked_now = v.locked_at::(now); Some(T::Currency::free_balance(who).min(locked_now)) } else { @@ -374,7 +394,8 @@ impl VestingSchedule for Module where }; Vesting::::insert(who, vesting_schedule); // it can't fail, but even if somehow it did, we don't really care. - let _ = Self::update_lock(who.clone()); + let res = Self::update_lock(who.clone()); + debug_assert!(res.is_ok()); Ok(()) } @@ -382,7 +403,8 @@ impl VestingSchedule for Module where fn remove_vesting_schedule(who: &T::AccountId) { Vesting::::remove(who); // it can't fail, but even if somehow it did, we don't really care. - let _ = Self::update_lock(who.clone()); + let res = Self::update_lock(who.clone()); + debug_assert!(res.is_ok()); } } @@ -408,9 +430,9 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Vesting: pallet_vesting::{Module, Call, Storage, Event, Config}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Vesting: pallet_vesting::{Pallet, Call, Storage, Event, Config}, } ); diff --git a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs index ff172b8bd2704..47ba5a4803056 100644 --- a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs +++ b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs @@ -38,75 +38,75 @@ fn main() { // peru16 let (smaller, bigger) = (u16_pair.0.min(u16_pair.1), u16_pair.0.max(u16_pair.1)); - let ratio = PerU16::from_rational_approximation(smaller, bigger); + let ratio = PerU16::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), + PerU16::from_float(smaller as f64 / bigger.max(1) as f64), 1, ); let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); - let ratio = PerU16::from_rational_approximation(smaller, bigger); + let ratio = PerU16::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), + PerU16::from_float(smaller as f64 / bigger.max(1) as f64), 1, ); let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); - let ratio = PerU16::from_rational_approximation(smaller, bigger); + let ratio = PerU16::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), + PerU16::from_float(smaller as f64 / bigger.max(1) as f64), 1, ); // percent let (smaller, bigger) = (u16_pair.0.min(u16_pair.1), u16_pair.0.max(u16_pair.1)); - let ratio = Percent::from_rational_approximation(smaller, bigger); + let ratio = Percent::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), + Percent::from_float(smaller as f64 / bigger.max(1) as f64), 1, ); let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); - let ratio = Percent::from_rational_approximation(smaller, bigger); + let ratio = Percent::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), + Percent::from_float(smaller as f64 / bigger.max(1) as f64), 1, ); let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); - let ratio = Percent::from_rational_approximation(smaller, bigger); + let ratio = Percent::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), + Percent::from_float(smaller as f64 / bigger.max(1) as f64), 1, ); // perbill let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); - let ratio = Perbill::from_rational_approximation(smaller, bigger); + let ratio = Perbill::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - Perbill::from_fraction(smaller as f64 / bigger.max(1) as f64), + Perbill::from_float(smaller as f64 / bigger.max(1) as f64), 100, ); let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); - let ratio = Perbill::from_rational_approximation(smaller, bigger); + let ratio = Perbill::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - Perbill::from_fraction(smaller as f64 / bigger.max(1) as f64), + Perbill::from_float(smaller as f64 / bigger.max(1) as f64), 100, ); // perquintillion let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); - let ratio = Perquintill::from_rational_approximation(smaller, bigger); + let ratio = Perquintill::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - Perquintill::from_fraction(smaller as f64 / bigger.max(1) as f64), + Perquintill::from_float(smaller as f64 / bigger.max(1) as f64), 1000, ); diff --git a/primitives/arithmetic/src/fixed_point.rs b/primitives/arithmetic/src/fixed_point.rs index 896d5f38451de..b837c360c7c54 100644 --- a/primitives/arithmetic/src/fixed_point.rs +++ b/primitives/arithmetic/src/fixed_point.rs @@ -376,7 +376,7 @@ macro_rules! implement_fixed { } #[cfg(any(feature = "std", test))] - pub fn from_fraction(x: f64) -> Self { + pub fn from_float(x: f64) -> Self { Self((x * (::DIV as f64)) as $inner_type) } diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index 561c14a37e203..d6069ad5154d1 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -494,7 +494,7 @@ mod threshold_compare_tests { fn peru16_rational_does_not_overflow() { // A historical example that will panic only for per_thing type that are created with // maximum capacity of their type, e.g. PerU16. - let _ = PerU16::from_rational_approximation(17424870u32, 17424870); + let _ = PerU16::from_rational(17424870u32, 17424870); } #[test] diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index f2b8c4f93b33f..29d5d2be73a1c 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -18,8 +18,9 @@ #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; -use sp_std::{ops, fmt, prelude::*, convert::TryInto}; +use sp_std::{ops, fmt, prelude::*, convert::{TryFrom, TryInto}}; use codec::{Encode, CompactAs}; +use num_traits::Pow; use crate::traits::{ SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, Unsigned, One, @@ -36,6 +37,7 @@ pub type UpperOf